ngram
listlengths
0
82k
[ "error is thrown when calling the function, can be removed", "calling the function, can be removed in the future \"\"\"", "be removed in the future \"\"\" def test_sample_attack(): test =", "in the future \"\"\" def test_sample_attack(): test = Sample_Attack(0, 0,", "pytest from privacy_evaluator.attacks.sample_attack import Sample_Attack \"\"\" This test only test", "test only test if no error is thrown when calling", "removed in the future \"\"\" def test_sample_attack(): test = Sample_Attack(0,", "no error is thrown when calling the function, can be", "can be removed in the future \"\"\" def test_sample_attack(): test", "the function, can be removed in the future \"\"\" def", "is thrown when calling the function, can be removed in", "\"\"\" This test only test if no error is thrown", "from privacy_evaluator.attacks.sample_attack import Sample_Attack \"\"\" This test only test if", "when calling the function, can be removed in the future", "future \"\"\" def test_sample_attack(): test = Sample_Attack(0, 0, 0) test.perform_attack()", "Sample_Attack \"\"\" This test only test if no error is", "function, can be removed in the future \"\"\" def test_sample_attack():", "if no error is thrown when calling the function, can", "the future \"\"\" def test_sample_attack(): test = Sample_Attack(0, 0, 0)", "thrown when calling the function, can be removed in the", "privacy_evaluator.attacks.sample_attack import Sample_Attack \"\"\" This test only test if no", "import Sample_Attack \"\"\" This test only test if no error", "This test only test if no error is thrown when", "test if no error is thrown when calling the function,", "import pytest from privacy_evaluator.attacks.sample_attack import Sample_Attack \"\"\" This test only", "only test if no error is thrown when calling the" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "'README.md']}, description='Client for the Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client", "and # limitations under the License. import ast import re", "distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io), a distributed", "limitations under the License. import ast import re from setuptools", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "permissions and # limitations under the License. import ast import", "ast import re from setuptools import setup import textwrap _version_re", "interactive and batch big data processing. Provides a low-level client", "with open('prestodb/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup(", "'Programming Language :: Python', 'Programming Language :: Python :: 2.7',", "and batch big data processing. Provides a low-level client and", "Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming", ":: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language", ":: CPython', 'Programming Language :: Python :: Implementation :: PyPy',", "3', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "distributed under the License is distributed on an \"AS IS\"", "implementation. \"\"\"), license='Apache 2.0', classifiers=[ 'Development Status :: 4 -", "'Operating System :: MacOS :: MacOS X', 'Operating System ::", "f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE',", "(https://prestodb.io), a distributed SQL engine for interactive and batch big", "Audience :: Developers', 'License :: OSI Approved :: Apache Software", "OSI Approved :: Apache Software License', 'Operating System :: MacOS", "the specific language governing permissions and # limitations under the", "open('prestodb/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client',", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", ":: Apache Software License', 'Operating System :: MacOS :: MacOS", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "2.0 implementation. \"\"\"), license='Apache 2.0', classifiers=[ 'Development Status :: 4", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System", ":: Python', 'Programming Language :: Python :: 2.7', 'Programming Language", "3.5', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "under the License. import ast import re from setuptools import", "import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as f:", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>',", "Language :: Python :: 3', 'Programming Language :: Python ::", "not use this file except in compliance with the License.", ":: 3.6', 'Programming Language :: Python :: Implementation :: CPython',", ":: MacOS X', 'Operating System :: POSIX', 'Operating System ::", "MacOS X', 'Operating System :: POSIX', 'Operating System :: Microsoft", "System :: POSIX', 'Operating System :: Microsoft :: Windows', 'Programming", "'Topic :: Database :: Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress',", "Engine', long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io), a distributed SQL engine", ":: Python :: 2.7', 'Programming Language :: Python :: 3',", "engine for interactive and batch big data processing. Provides a", "writing, software # distributed under the License is distributed on", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "in writing, software # distributed under the License is distributed", "package_data={'': ['LICENSE', 'README.md']}, description='Client for the Presto distributed SQL Engine',", "Python :: Implementation :: PyPy', 'Topic :: Database :: Front-Ends',", "you may not use this file except in compliance with", "from setuptools import setup import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with", "= str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'],", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. import", "'License :: OSI Approved :: Apache Software License', 'Operating System", "Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming", "use this file except in compliance with the License. #", "a distributed SQL engine for interactive and batch big data", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", ":: Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language", "PyPy', 'Topic :: Database :: Front-Ends', ], install_requires=[ 'click', 'future',", "Software License', 'Operating System :: MacOS :: MacOS X', 'Operating", "Language :: Python :: 3.6', 'Programming Language :: Python ::", "CONDITIONS OF ANY KIND, either express or implied. # See", ":: OSI Approved :: Apache Software License', 'Operating System ::", "_version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as f: version =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "license='Apache 2.0', classifiers=[ 'Development Status :: 4 - Beta', 'Intended", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", ":: 2.7', 'Programming Language :: Python :: 3', 'Programming Language", "# You may obtain a copy of the License at", "- Beta', 'Intended Audience :: Developers', 'License :: OSI Approved", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "Provides a low-level client and a DBAPI 2.0 implementation. \"\"\"),", "Language :: Python :: 2.7', 'Programming Language :: Python ::", ":: Python :: 3.6', 'Programming Language :: Python :: Implementation", "under the License is distributed on an \"AS IS\" BASIS,", "Python', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "'Programming Language :: Python :: Implementation :: CPython', 'Programming Language", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "2.7', 'Programming Language :: Python :: 3', 'Programming Language ::", "Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "for Presto (https://prestodb.io), a distributed SQL engine for interactive and", ":: Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six',", "System :: MacOS :: MacOS X', 'Operating System :: POSIX',", "Language :: Python :: 3.5', 'Programming Language :: Python ::", "the Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io),", "textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as f: version", "for interactive and batch big data processing. Provides a low-level", "X', 'Operating System :: POSIX', 'Operating System :: Microsoft ::", "a DBAPI 2.0 implementation. \"\"\"), license='Apache 2.0', classifiers=[ 'Development Status", "['LICENSE', 'README.md']}, description='Client for the Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\"", "License', 'Operating System :: MacOS :: MacOS X', 'Operating System", "Microsoft :: Windows', 'Programming Language :: Python', 'Programming Language ::", "'Intended Audience :: Developers', 'License :: OSI Approved :: Apache", "setup import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as", "SQL engine for interactive and batch big data processing. Provides", "'Development Status :: 4 - Beta', 'Intended Audience :: Developers',", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "import re from setuptools import setup import textwrap _version_re =", "Client for Presto (https://prestodb.io), a distributed SQL engine for interactive", ":: Python :: Implementation :: PyPy', 'Topic :: Database ::", ":: 3', 'Programming Language :: Python :: 3.5', 'Programming Language", "import ast import re from setuptools import setup import textwrap", "the License for the specific language governing permissions and #", "Language :: Python :: Implementation :: CPython', 'Programming Language ::", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python", "# you may not use this file except in compliance", "Beta', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "either express or implied. # See the License for the", "System :: Microsoft :: Windows', 'Programming Language :: Python', 'Programming", "OR CONDITIONS OF ANY KIND, either express or implied. #", "'Programming Language :: Python :: 3', 'Programming Language :: Python", "POSIX', 'Operating System :: Microsoft :: Windows', 'Programming Language ::", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>',", "Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io), a", "the License is distributed on an \"AS IS\" BASIS, #", "Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing',", "in compliance with the License. # You may obtain a", "url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for the Presto distributed", "software # distributed under the License is distributed on an", ":: Database :: Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress', 'requests',", "'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty', 'pytest', 'pytest-runner', ]}", "2.0', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience", "version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client',", "# # Unless required by applicable law or agreed to", "Windows', 'Programming Language :: Python', 'Programming Language :: Python ::", "for the Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client for Presto", ":: Implementation :: PyPy', 'Topic :: Database :: Front-Ends', ],", "Python :: 3.6', 'Programming Language :: Python :: Implementation ::", "'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty',", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "4 - Beta', 'Intended Audience :: Developers', 'License :: OSI", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']},", "Version 2.0 (the \"License\"); # you may not use this", "'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty', 'pytest',", "batch big data processing. Provides a low-level client and a", "processing. Provides a low-level client and a DBAPI 2.0 implementation.", "law or agreed to in writing, software # distributed under", "author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for the", "import setup import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb')", ":: Developers', 'License :: OSI Approved :: Apache Software License',", "\"\"\"), license='Apache 2.0', classifiers=[ 'Development Status :: 4 - Beta',", "'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty', 'pytest', 'pytest-runner',", "], install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ],", "Python :: Implementation :: CPython', 'Programming Language :: Python ::", "Database :: Front-Ends', ], install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos',", "setuptools import setup import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py',", "implied. # See the License for the specific language governing", "License. import ast import re from setuptools import setup import", "'Programming Language :: Python :: Implementation :: PyPy', 'Topic ::", "under the Apache License, Version 2.0 (the \"License\"); # you", "version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for the Presto", "'Operating System :: Microsoft :: Windows', 'Programming Language :: Python',", "\"License\"); # you may not use this file except in", "name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", ":: Implementation :: CPython', 'Programming Language :: Python :: Implementation", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "DBAPI 2.0 implementation. \"\"\"), license='Apache 2.0', classifiers=[ 'Development Status ::", "and a DBAPI 2.0 implementation. \"\"\"), license='Apache 2.0', classifiers=[ 'Development", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1)))", "f: version = str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version,", "SQL Engine', long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io), a distributed SQL", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "distributed SQL engine for interactive and batch big data processing.", "author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for", "= re.compile(r'__version__\\s+=\\s+(.*)') with open('prestodb/__init__.py', 'rb') as f: version = str(ast.literal_eval(_version_re.search(", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "description='Client for the Presto distributed SQL Engine', long_description=textwrap.dedent(\"\"\" Client for", "client and a DBAPI 2.0 implementation. \"\"\"), license='Apache 2.0', classifiers=[", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", ":: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating", "to in writing, software # distributed under the License is", ":: POSIX', 'Operating System :: Microsoft :: Windows', 'Programming Language", "3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming", "'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[ 'httpretty', 'pytest', 'pytest-runner', ]} )", "a low-level client and a DBAPI 2.0 implementation. \"\"\"), license='Apache", "classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience ::", "Developers', 'License :: OSI Approved :: Apache Software License', 'Operating", ":: PyPy', 'Topic :: Database :: Front-Ends', ], install_requires=[ 'click',", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "Language :: Python :: Implementation :: PyPy', 'Topic :: Database", "Approved :: Apache Software License', 'Operating System :: MacOS ::", ":: Python :: Implementation :: CPython', 'Programming Language :: Python", "Status :: 4 - Beta', 'Intended Audience :: Developers', 'License", "Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming", "You may obtain a copy of the License at #", "Implementation :: CPython', 'Programming Language :: Python :: Implementation ::", ":: Windows', 'Programming Language :: Python', 'Programming Language :: Python", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "Implementation :: PyPy', 'Topic :: Database :: Front-Ends', ], install_requires=[", ":: 4 - Beta', 'Intended Audience :: Developers', 'License ::", "required by applicable law or agreed to in writing, software", "'Operating System :: POSIX', 'Operating System :: Microsoft :: Windows',", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "governing permissions and # limitations under the License. import ast", "packages=['prestodb'], package_data={'': ['LICENSE', 'README.md']}, description='Client for the Presto distributed SQL", "with the License. # You may obtain a copy of", "the License. import ast import re from setuptools import setup", "this file except in compliance with the License. # You", "long_description=textwrap.dedent(\"\"\" Client for Presto (https://prestodb.io), a distributed SQL engine for", "data processing. Provides a low-level client and a DBAPI 2.0", "# limitations under the License. import ast import re from", "big data processing. Provides a low-level client and a DBAPI", "the Apache License, Version 2.0 (the \"License\"); # you may", "Presto (https://prestodb.io), a distributed SQL engine for interactive and batch", "low-level client and a DBAPI 2.0 implementation. \"\"\"), license='Apache 2.0',", "Apache Software License', 'Operating System :: MacOS :: MacOS X',", ":: Python :: 3', 'Programming Language :: Python :: 3.5',", "install_requires=[ 'click', 'future', 'ipaddress', 'requests', 'requests_kerberos', 'six', 'typing', ], extras_require={'tests':[", "CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic", "str(ast.literal_eval(_version_re.search( f.read().decode('utf-8')).group(1))) setup( name='presto-python-client', author='<NAME>', author_email='<EMAIL>', version=version, url='https://github.com/prestodb/presto-python-client', packages=['prestodb'], package_data={'':", "re from setuptools import setup import textwrap _version_re = re.compile(r'__version__\\s+=\\s+(.*)')" ]
[ "\"Linear Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics", "\"Discrete Mathematics\", \"Adv Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage =", "Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator =", "= [\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv Engineering Mathematics\", \"Linear Algebra\",", "matplotlib.pyplot as graph subject = [\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv", "Mathematics\", \"Adv Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345]", "graph subject = [\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv Engineering Mathematics\",", "Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics Topic", "<gh_stars>1-10 import matplotlib.pyplot as graph subject = [\"Probability\", \"Calculas\", \"Discrete", "import matplotlib.pyplot as graph subject = [\"Probability\", \"Calculas\", \"Discrete Mathematics\",", "\"Adv Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator", "subject = [\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv Engineering Mathematics\", \"Linear", "as graph subject = [\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv Engineering", "[\"Probability\", \"Calculas\", \"Discrete Mathematics\", \"Adv Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"]", "\"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics Topic Weightage\")", "\"Calculas\", \"Discrete Mathematics\", \"Adv Engineering Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage", "weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics Topic Weightage\") graph.pie(weightage,labels=subject,autopct=\"%0.1f%%\",", "[250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics Topic Weightage\") graph.pie(weightage,labels=subject,autopct=\"%0.1f%%\", explode=seperator) graph.show()", "Mathematics\", \"Linear Algebra\", \"Cryptography\"] weightage = [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05]", "= [250,900,850,1200,290,345] seperator = [0.05,0,0,0,0.05,0.05] graph.title(\"Mathematics Topic Weightage\") graph.pie(weightage,labels=subject,autopct=\"%0.1f%%\", explode=seperator)" ]
[ "degrees 0,1,...,10 train_err = [] validation_err = [] for k", "train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ],", "0.5, 500))]: train_err = [] validation_err = [] for lam", "round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation = \", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples:", "train_y, \"type\": \"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\":", "+ \\ gg.theme_bw() + gg.labs(y=\"Average training and validation errors\", title=title)", "mean_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate from", "lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss = \",", "\"test\"}) x_stat = np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({\"x\": x_stat,", "Ridge and Lasso regressions for name, learner, ran in [(\"Ridge\",", "train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\":", "1) * (x - 1) * (x - 2) X", "in ran: rg = learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(),", "validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 -", "select_polynomial_degree(n_samples: int = 100, noise: float = 5): \"\"\" Simulate", "title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})\"", "import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model", "\\ gg.geom_point() + \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average", "3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"}) df_test", "parameter for Ridge and Lasso regressions for name, learner, ran", "(x + 1) * (x - 1) * (x -", "pd.Series(y), train_proportion=(2 / 3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y,", "k =\", best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation", "Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0)", "500))]: train_err = [] validation_err = [] for lam in", "\\ gg.labs(y=\"Average training and validation errors\", title=f\"{title} \\nWith Noise: {noise},", "~ N(0,{noise})\" p = gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\", \"y\",", "= pd.DataFrame({\"k\": range(11), \"avg error\": train_err, \"type\": \"train error\"}) df2", "[] for lam in ran: rg = learner(lam) train_score, validation_score", "portions def f(x): return (x + 3) * (x +", "= \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree()", "= gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\")) + \\ gg.geom_line() +", "1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for", "\"type\": \"Model\"}) df = pd.concat([df_test, df_train]) title = f\"f(x) =", "gg def select_polynomial_degree(n_samples: int = 100, noise: float = 5):", "portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y", "* (x + 1) * (x - 1) * (x", "train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran, \"avg error\": train_err, \"type\":", "from sklearn.linear_model import Lasso from utils import * import plotnine", "f(X) + np.random.normal(0, noise, n_samples) train_X, train_y, test_X, test_y =", "= f\" Cross Validation for Polynomial Fitting Over Different Degrees", "y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda {name} = {round(ran[best_lam], 3)}\") print(f\"Test", "error\", color=\"type\")) + \\ gg.geom_point() + \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11))", "* (x - 2) X = np.linspace(-1.2, 2, n_samples) y", "Lasso, np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err = []", "= [] validation_err = [] for k in range(11): pf", "LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from utils import *", "[] validation_err = [] for k in range(11): pf =", "gg.theme_bw() + \\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) #", "gg.aes(\"x\", \"y\")) + \\ gg.theme_bw() + \\ gg.ggtitle(title) # print(p)", "generate n_evaluations: int, default = 500 Number of regularization parameter", "Number of regularization parameter values to evaluate for each of", "k, fit a k-degree polynomial model and report test error", "IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from", "+ \\ gg.labs(y=\"Average training and validation errors\", title=f\"{title} \\nWith Noise:", "for name, learner, ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)),", "(x + 3) * (x + 2) * (x +", "2) * (x + 1) * (x - 1) *", "best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred =", "learner, ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso,", "= learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score)", "= pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\": \"test\"}) x_stat = np.linspace(-1.4,", "gg.aes(\"lambda\", \"avg error\", color=\"type\")) + \\ gg.geom_line() + \\ gg.theme_bw()", "y[:50], X.iloc[50:, ], y[50:] # Question 7 - Perform CV", "\"train error\"}) df2 = pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err, \"type\":", "annotations import numpy as np import pandas as pd from", "= pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err, \"type\": \"validation error\"}) df", "rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda {name} = {round(ran[best_lam],", "error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"lambda\": ran, \"avg", "- Load diabetes dataset and split into training and testing", "\"\"\" # Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2)", "= {round(ran[best_lam], 3)}\") print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\")", "best fitting regularization parameter values for Ridge and Lasso regressions", "train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran, \"avg error\":", "\\nWith Noise: {noise}, Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png',", "import mean_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate", "import plotnine as gg def select_polynomial_degree(n_samples: int = 100, noise:", "rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda", "utils import * import plotnine as gg def select_polynomial_degree(n_samples: int", "color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\ gg.theme_bw() +", "gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 - Perform CV for", "cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran,", "np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\":", "into training and testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)", "k in range(11): pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf,", "pf.predict(test_X.to_numpy()) print(\"best k =\", best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred),", "= \", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation = \", round(validation_err[best_k], 2))", "\"type\": \"validation error\"}) df = pd.concat([df1, df2]) title = f\"{name}", "df = pd.concat([df1, df2]) title = f\"{name} Regularization Cross Validate", "model and Least Squares model best_lam = np.argmin(np.array(validation_err)) rg =", "testing portions def f(x): return (x + 3) * (x", "{name} = {round(ran[best_lam], 3)}\") print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred),", "for each of the algorithms \"\"\" # Question 6 -", "{round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression", "as np import pandas as pd from sklearn import datasets", "p = gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\")) + \\ gg.geom_point()", "data from a polynomial model and use cross-validation to select", "to select the best fitting regularization parameter values for Ridge", "Using best value of k, fit a k-degree polynomial model", "range(11), \"avg error\": validation_err, \"type\": \"validation error\"}) df = pd.concat([df1,", "gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) +", "Validation for Polynomial Fitting Over Different Degrees k\" p =", "df_train]) title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~", "= np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy())", "= pd.DataFrame({\"k\": range(11), \"avg error\": validation_err, \"type\": \"validation error\"}) df", "and split into training- and testing portions def f(x): return", "regressions Parameters ---------- n_samples: int, default=50 Number of samples to", "\"\"\" Using sklearn's diabetes dataset use cross-validation to select the", "n_samples) y = f(X) + np.random.normal(0, noise, n_samples) train_X, train_y,", "datasets from IMLearn.metrics import mean_square_error from IMLearn.utils import split_train_test from", "CV for polynomial fitting with degrees 0,1,...,10 train_err = []", "n_samples) train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 /", "float, default = 5 Noise level to simulate in responses", "gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 -", "and validation errors\", title=f\"{title} \\nWith Noise: {noise}, Num of samples:", "select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500): \"\"\" Using", "n_evaluations: int, default = 500 Number of regularization parameter values", "gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training and validation errors\",", "from utils import * import plotnine as gg def select_polynomial_degree(n_samples:", "fitting with degrees 0,1,...,10 train_err = [] validation_err = []", "= gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) + \\", "= [] validation_err = [] for lam in ran: rg", "= np.linspace(-1.2, 2, n_samples) y = f(X) + np.random.normal(0, noise,", "of k, fit a k-degree polynomial model and report test", "of samples to generate noise: float, default = 5 Noise", "use cross-validation to select the best fitting regularization parameter values", "errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 - Compare", "x_stat = np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({\"x\": x_stat, \"y\":", "Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\")) + \\", "import Lasso from utils import * import plotnine as gg", "and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8", "= pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"}) df_test = pd.DataFrame({\"x\":", "print(f\"best lambda {name} = {round(ran[best_lam], 3)}\") print(f\"Test MSE {name} =", "\\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training and validation", "plot=p, verbose=False) # Question 3 - Using best value of", "= [] for lam in ran: rg = learner(lam) train_score,", "Lasso regressions Parameters ---------- n_samples: int, default=50 Number of samples", "Lasso from utils import * import plotnine as gg def", "= (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})\" p = gg.ggplot()", "verbose=False) # Question 2 - Perform CV for polynomial fitting", "of the algorithms \"\"\" # Question 6 - Load diabetes", "from sklearn import datasets from IMLearn.metrics import mean_square_error from IMLearn.utils", "X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y =", "Question 7 - Perform CV for different values of the", "PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from utils import", "= gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\")) + \\ gg.geom_point() +", "in responses \"\"\" # Question 1 - Generate dataset for", "X = np.linspace(-1.2, 2, n_samples) y = f(X) + np.random.normal(0,", "int, default=100 Number of samples to generate noise: float, default", "\"y\")) + \\ gg.theme_bw() + \\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png',", "+ \\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question", "model and report test error best_k = np.argmin(np.array(validation_err)) pf =", "noise, n_samples) train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2", "\"avg error\", color=\"type\")) + \\ gg.geom_point() + \\ gg.theme_bw() +", "the best fitting degree Parameters ---------- n_samples: int, default=100 Number", "each of the algorithms \"\"\" # Question 6 - Load", "train_proportion=(2 / 3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\":", "train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11), \"avg error\":", "Validate Over Different Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\", \"avg error\",", "{name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy())", "import numpy as np import pandas as pd from sklearn", "y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50,", "pd.DataFrame({\"k\": range(11), \"avg error\": train_err, \"type\": \"train error\"}) df2 =", "np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best", "test_X.squeeze(), \"y\": test_y, \"type\": \"test\"}) x_stat = np.linspace(-1.4, 2, 100)", "int = 500): \"\"\" Using sklearn's diabetes dataset use cross-validation", "int, default=50 Number of samples to generate n_evaluations: int, default", "= {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear", "df = pd.concat([df_test, df_train]) title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) +", "+ Gaussian noise ~ N(0,{noise})\" p = gg.ggplot() + \\", "= 500 Number of regularization parameter values to evaluate for", "], y[50:] # Question 7 - Perform CV for different", "model best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred", "---------- n_samples: int, default=100 Number of samples to generate noise:", "training and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question", "import * import plotnine as gg def select_polynomial_degree(n_samples: int =", "ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001,", "= f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})\" p", "= pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\": \"Model\"}) df = pd.concat([df_test,", "evaluate for each of the algorithms \"\"\" # Question 6", "Gaussian noise # and split into training- and testing portions", "3)}\") print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr =", "pandas as pd from sklearn import datasets from IMLearn.metrics import", "print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation = \", round(validation_err[best_k],", "int = 100, noise: float = 5): \"\"\" Simulate data", "fit a k-degree polynomial model and report test error best_k", "for Polynomial Fitting Over Different Degrees k\" p = gg.ggplot(df,", "gg.aes(\"k\", \"avg error\", color=\"type\")) + \\ gg.geom_point() + \\ gg.theme_bw()", "{n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) # Question 3 -", "from IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors", "Ridge model, best Lasso model and Least Squares model best_lam", "Noise level to simulate in responses \"\"\" # Question 1", "learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda {name} =", "model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise # and", "\"y\", color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\ gg.theme_bw()", "dataset and split into training and testing portions X, y", "default=50 Number of samples to generate n_evaluations: int, default =", "train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"k\": range(11), \"avg error\":", "and report test error best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k))", "polynomial model and report test error best_k = np.argmin(np.array(validation_err)) pf", "# Question 7 - Perform CV for different values of", "Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps", "Perform CV for different values of the regularization parameter for", "error\"}) df = pd.concat([df1, df2]) title = f\" Cross Validation", "np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]: train_err =", "error\"}) df2 = pd.DataFrame({\"k\": range(11), \"avg error\": validation_err, \"type\": \"validation", "pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err, \"type\": \"validation error\"}) df =", "title = f\"{name} Regularization Cross Validate Over Different Lambda\" p", "validation_err, \"type\": \"validation error\"}) df = pd.concat([df1, df2]) title =", "pd.DataFrame({\"k\": range(11), \"avg error\": validation_err, \"type\": \"validation error\"}) df =", "error best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred", "values for Ridge and Lasso regressions Parameters ---------- n_samples: int,", "round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int = 50, n_evaluations: int =", "Cross Validation for Polynomial Fitting Over Different Degrees k\" p", "sklearn's diabetes dataset use cross-validation to select the best fitting", "train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11), \"avg error\": train_err, \"type\":", "Parameters ---------- n_samples: int, default=50 Number of samples to generate", "gg.theme_bw() + gg.labs(y=\"Average training and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p,", "fitting regularization parameter values for Ridge and Lasso regressions Parameters", "p = gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\")) + \\ gg.geom_line()", "+ \\ gg.theme_bw() + \\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p,", "in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5,", "- Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps", "pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(),", "for Ridge and Lasso regressions for name, learner, ran in", "def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500): \"\"\"", "pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best k", "test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train =", "n_samples: int, default=100 Number of samples to generate noise: float,", "plot=p, verbose=False) # Question 8 - Compare best Ridge model,", "cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import", "Load diabetes dataset and split into training and testing portions", "(x - 2) X = np.linspace(-1.2, 2, n_samples) y =", "Using sklearn's diabetes dataset use cross-validation to select the best", "= pf.predict(test_X.to_numpy()) print(\"best k =\", best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(),", "from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso", "to generate noise: float, default = 5 Noise level to", "= LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss = \", lr.loss(test_X.to_numpy(),", "2) X = np.linspace(-1.2, 2, n_samples) y = f(X) +", "degree Parameters ---------- n_samples: int, default=100 Number of samples to", "\"Model\"}) df = pd.concat([df_test, df_train]) title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2)", "# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) +", "f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})\" p =", "\"validation error\"}) df = pd.concat([df1, df2]) title = f\" Cross", "train_y.to_numpy()) print(\"Linear Regression Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__", "gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) + \\ gg.geom_line(df_stat,", "n_evaluations: int = 500): \"\"\" Using sklearn's diabetes dataset use", "{noise}, Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False)", "y[50:] # Question 7 - Perform CV for different values", "+ \\ gg.geom_line() + \\ gg.theme_bw() + gg.labs(y=\"Average training and", "regressions for name, learner, ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05,", "np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err = [] for", "np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best", "pd.DataFrame({\"lambda\": ran, \"avg error\": train_err, \"type\": \"train error\"}) df2 =", "train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best k =\", best_k) print(\"Test =", "from __future__ import annotations import numpy as np import pandas", "df2]) title = f\" Cross Validation for Polynomial Fitting Over", "= pd.concat([df1, df2]) title = f\"{name} Regularization Cross Validate Over", "Ridge and Lasso regressions Parameters ---------- n_samples: int, default=50 Number", "\"avg error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"k\": range(11),", "df1 = pd.DataFrame({\"k\": range(11), \"avg error\": train_err, \"type\": \"train error\"})", "lambda {name} = {round(ran[best_lam], 3)}\") print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(),", "N(0,{noise})\" p = gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\"))", "= 5): \"\"\" Simulate data from a polynomial model and", "test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] #", "the algorithms \"\"\" # Question 6 - Load diabetes dataset", "test_y, \"type\": \"test\"}) x_stat = np.linspace(-1.4, 2, 100) df_stat =", "dataset use cross-validation to select the best fitting regularization parameter", "(x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})\" p = gg.ggplot() +", "pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\": \"test\"}) x_stat = np.linspace(-1.4, 2,", "training and testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X,", "gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) # Question 3 - Using", "polynomial fitting with degrees 0,1,...,10 train_err = [] validation_err =", "= pd.concat([df_test, df_train]) title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian", "PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best k =\", best_k)", "= split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(),", "error\"}) df2 = pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err, \"type\": \"validation", "as gg def select_polynomial_degree(n_samples: int = 100, noise: float =", "\", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation = \", round(validation_err[best_k], 2)) def", "\"type\": \"test\"}) x_stat = np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({\"x\":", "= datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50, :],", "= pd.DataFrame({\"lambda\": ran, \"avg error\": train_err, \"type\": \"train error\"}) df2", "df_stat = pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\": \"Model\"}) df =", "\"y\": test_y, \"type\": \"test\"}) x_stat = np.linspace(-1.4, 2, 100) df_stat", "\"train error\"}) df2 = pd.DataFrame({\"k\": range(11), \"avg error\": validation_err, \"type\":", "different values of the regularization parameter for Ridge and Lasso", "values of the regularization parameter for Ridge and Lasso regressions", "\", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0)", "Question 2 - Perform CV for polynomial fitting with degrees", "500): \"\"\" Using sklearn's diabetes dataset use cross-validation to select", "to simulate in responses \"\"\" # Question 1 - Generate", ":], y[:50], X.iloc[50:, ], y[50:] # Question 7 - Perform", "rg.predict(test_X.to_numpy()) print(f\"best lambda {name} = {round(ran[best_lam], 3)}\") print(f\"Test MSE {name}", "\"\"\" # Question 6 - Load diabetes dataset and split", "\\ gg.theme_bw() + \\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)", "(x - 1) * (x - 2) X = np.linspace(-1.2,", "= learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda {name}", "fitting degree Parameters ---------- n_samples: int, default=100 Number of samples", "= rg.predict(test_X.to_numpy()) print(f\"best lambda {name} = {round(ran[best_lam], 3)}\") print(f\"Test MSE", "= 100, noise: float = 5): \"\"\" Simulate data from", "validation_err = [] for lam in ran: rg = learner(lam)", "f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise # and split", "gg.geom_line() + \\ gg.theme_bw() + gg.labs(y=\"Average training and validation errors\",", "sklearn import datasets from IMLearn.metrics import mean_square_error from IMLearn.utils import", "= PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best k =\",", "CV for different values of the regularization parameter for Ridge", "train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"lambda\": ran, \"avg error\":", "and use cross-validation to select the best fitting degree Parameters", "regularization parameter for Ridge and Lasso regressions for name, learner,", "1) * (x - 2) X = np.linspace(-1.2, 2, n_samples)", "import split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting,", "= pd.concat([df1, df2]) title = f\" Cross Validation for Polynomial", "[(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]:", "eps for eps Gaussian noise # and split into training-", "df2 = pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err, \"type\": \"validation error\"})", "value of k, fit a k-degree polynomial model and report", "Polynomial Fitting Over Different Degrees k\" p = gg.ggplot(df, gg.aes(\"k\",", "simulate in responses \"\"\" # Question 1 - Generate dataset", "df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"}) df_test =", "f\"{name} Regularization Cross Validate Over Different Lambda\" p = gg.ggplot(df,", "5): \"\"\" Simulate data from a polynomial model and use", "title = f\" Cross Validation for Polynomial Fitting Over Different", "samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) # Question 3", "best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation = \",", "0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]: train_err = []", "\"y\": train_y, \"type\": \"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y,", "lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if", "best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred =", "level to simulate in responses \"\"\" # Question 1 -", "print(\"best k =\", best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred), 2))", "np.random.normal(0, noise, n_samples) train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y),", "pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\": \"Model\"}) df = pd.concat([df_test, df_train])", "model and use cross-validation to select the best fitting degree", "samples to generate noise: float, default = 5 Noise level", "2)) def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):", "X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] # Question 7 -", "6 - Load diabetes dataset and split into training and", "\\ gg.ggtitle(title) # print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2", "# and split into training- and testing portions def f(x):", "return (x + 3) * (x + 2) * (x", "y_pred), 2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss", "lam in ran: rg = learner(lam) train_score, validation_score = cross_validate(rg,", "* import plotnine as gg def select_polynomial_degree(n_samples: int = 100,", "= [] for k in range(11): pf = PolynomialFitting(k) train_score,", "select the best fitting degree Parameters ---------- n_samples: int, default=100", "for k in range(11): pf = PolynomialFitting(k) train_score, validation_score =", "IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from", "2)) print(\"Validation = \", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int =", "x_stat, \"y\": f(x_stat), \"type\": \"Model\"}) df = pd.concat([df_test, df_train]) title", "for polynomial fitting with degrees 0,1,...,10 train_err = [] validation_err", "gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\ gg.theme_bw() + \\ gg.ggtitle(title) #", "if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0) select_polynomial_degree(n_samples=1500, noise=10) select_regularization_parameter()", "print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 - Perform CV", "# print(p) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 2 - Perform", "Regularization Cross Validate Over Different Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\",", "split into training- and testing portions def f(x): return (x", "range(11): pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(),", "Lasso regressions for name, learner, ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001,", "def f(x): return (x + 3) * (x + 2)", "\"avg error\": validation_err, \"type\": \"validation error\"}) df = pd.concat([df1, df2])", "np.linspace(-1.2, 2, n_samples) y = f(X) + np.random.normal(0, noise, n_samples)", "train_err = [] validation_err = [] for k in range(11):", "Question 8 - Compare best Ridge model, best Lasso model", "default = 500 Number of regularization parameter values to evaluate", "df1 = pd.DataFrame({\"lambda\": ran, \"avg error\": train_err, \"type\": \"train error\"})", "2, 100) df_stat = pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\": \"Model\"})", "+ gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training and validation errors\", title=f\"{title}", "split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\":", "gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 - Compare best Ridge", "= cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\":", "2 - Perform CV for polynomial fitting with degrees 0,1,...,10", "and Lasso regressions Parameters ---------- n_samples: int, default=50 Number of", "= 50, n_evaluations: int = 500): \"\"\" Using sklearn's diabetes", "regularization parameter values to evaluate for each of the algorithms", "8 - Compare best Ridge model, best Lasso model and", "# Question 8 - Compare best Ridge model, best Lasso", "best value of k, fit a k-degree polynomial model and", "import pandas as pd from sklearn import datasets from IMLearn.metrics", "+ np.random.normal(0, noise, n_samples) train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X),", "f\" Cross Validation for Polynomial Fitting Over Different Degrees k\"", "gg.labs(y=\"Average training and validation errors\", title=f\"{title} \\nWith Noise: {noise}, Num", "# Question 3 - Using best value of k, fit", "error\"}) df = pd.concat([df1, df2]) title = f\"{name} Regularization Cross", "Over Different Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\"))", "noise # and split into training- and testing portions def", "Fitting Over Different Degrees k\" p = gg.ggplot(df, gg.aes(\"k\", \"avg", "y = f(X) + np.random.normal(0, noise, n_samples) train_X, train_y, test_X,", "2, n_samples) y = f(X) + np.random.normal(0, noise, n_samples) train_X,", "pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)", "and Least Squares model best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam])", "import annotations import numpy as np import pandas as pd", "testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X,", "train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train", "+ \\ gg.geom_point() + \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\", "gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training and validation errors\", title=f\"{title} \\nWith", "split into training and testing portions X, y = datasets.load_diabetes(return_X_y=True,", "/ 3)) df_train = pd.DataFrame({\"x\": train_X.squeeze(), \"y\": train_y, \"type\": \"Train\"})", "to select the best fitting degree Parameters ---------- n_samples: int,", "gg.labs(y=\"Average training and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) #", "- Perform CV for polynomial fitting with degrees 0,1,...,10 train_err", "\\ gg.geom_line() + \\ gg.theme_bw() + gg.labs(y=\"Average training and validation", "train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran, \"avg", "= 5 Noise level to simulate in responses \"\"\" #", "pd.concat([df1, df2]) title = f\"{name} Regularization Cross Validate Over Different", "learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score)", "regularization parameter values for Ridge and Lasso regressions Parameters ----------", "=\", best_k) print(\"Test = \", round(mean_square_error(test_y.to_numpy(), y_pred), 2)) print(\"Validation =", "Question 6 - Load diabetes dataset and split into training", "Perform CV for polynomial fitting with degrees 0,1,...,10 train_err =", "eps Gaussian noise # and split into training- and testing", "gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\")) + \\ gg.geom_point() + \\", "validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 =", "= f(X) + np.random.normal(0, noise, n_samples) train_X, train_y, test_X, test_y", "ran, \"avg error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"lambda\":", "color=\"type\")) + \\ gg.geom_line() + \\ gg.theme_bw() + gg.labs(y=\"Average training", "50, n_evaluations: int = 500): \"\"\" Using sklearn's diabetes dataset", "report test error best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(),", "Degrees k\" p = gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\")) +", "LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy()))", "MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(),", "y_pred), 2)) print(\"Validation = \", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int", "PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score)", "of samples to generate n_evaluations: int, default = 500 Number", "int, default = 500 Number of regularization parameter values to", "f(x_stat), \"type\": \"Model\"}) df = pd.concat([df_test, df_train]) title = f\"f(x)", "noise ~ N(0,{noise})\" p = gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\",", "\"\"\" Simulate data from a polynomial model and use cross-validation", "(\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err =", "---------- n_samples: int, default=50 Number of samples to generate n_evaluations:", "pf.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = pf.predict(test_X.to_numpy()) print(\"best k =\", best_k) print(\"Test", "training- and testing portions def f(x): return (x + 3)", "range(11), \"avg error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"k\":", "+ \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\ gg.theme_bw() + \\", "the best fitting regularization parameter values for Ridge and Lasso", "use cross-validation to select the best fitting degree Parameters ----------", "test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0) select_polynomial_degree(n_samples=1500, noise=10)", "Number of samples to generate n_evaluations: int, default = 500", "Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian", "- Perform CV for different values of the regularization parameter", "sklearn.linear_model import Lasso from utils import * import plotnine as", "{noise} {n_samples}.png', plot=p, verbose=False) # Question 3 - Using best", "datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50],", "for lam in ran: rg = learner(lam) train_score, validation_score =", "title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False) # Question 8 - Compare best", "the regularization parameter for Ridge and Lasso regressions for name,", "best Ridge model, best Lasso model and Least Squares model", "Cross Validate Over Different Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\", \"avg", "+ eps for eps Gaussian noise # and split into", "of the regularization parameter for Ridge and Lasso regressions for", "train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11), \"avg", "Gaussian noise ~ N(0,{noise})\" p = gg.ggplot() + \\ gg.geom_point(df,", "500 Number of regularization parameter values to evaluate for each", "100) df_stat = pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat), \"type\": \"Model\"}) df", "(x + 2) * (x + 1) * (x -", "- 1) * (x - 2) X = np.linspace(-1.2, 2,", "import PolynomialFitting, LinearRegression, RidgeRegression from sklearn.linear_model import Lasso from utils", "+ \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training and", "of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) # Question", "ran, \"avg error\": validation_err, \"type\": \"validation error\"}) df = pd.concat([df1,", "\"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\": \"test\"}) x_stat", "rg = learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)", "train_err = [] validation_err = [] for lam in ran:", "500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]: train_err = [] validation_err", "cross-validation to select the best fitting regularization parameter values for", "[] for k in range(11): pf = PolynomialFitting(k) train_score, validation_score", "errors\", title=f\"{title} \\nWith Noise: {noise}, Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title}", "model, best Lasso model and Least Squares model best_lam =", "+ 3) * (x + 2) * (x + 1)", "Different Degrees k\" p = gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\"))", "RidgeRegression from sklearn.linear_model import Lasso from utils import * import", "and split into training and testing portions X, y =", "gg.aes(\"x\", \"y\", color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\", "* (x + 2) * (x + 1) * (x", "polynomial model and use cross-validation to select the best fitting", "Over Different Degrees k\" p = gg.ggplot(df, gg.aes(\"k\", \"avg error\",", "validation_err = [] for k in range(11): pf = PolynomialFitting(k)", "Least Squares model best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(),", "Regression Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__':", "a k-degree polynomial model and report test error best_k =", "validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 =", "\\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\")) + \\ gg.theme_bw() + \\ gg.ggtitle(title)", "numpy as np import pandas as pd from sklearn import", "for eps Gaussian noise # and split into training- and", "df2 = pd.DataFrame({\"k\": range(11), \"avg error\": validation_err, \"type\": \"validation error\"})", "for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise #", "[] validation_err = [] for lam in ran: rg =", "= np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy())", "plot=p, verbose=False) # Question 2 - Perform CV for polynomial", "generate noise: float, default = 5 Noise level to simulate", "k-degree polynomial model and report test error best_k = np.argmin(np.array(validation_err))", "test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] # Question", "= f\"{name} Regularization Cross Validate Over Different Lambda\" p =", "\"avg error\", color=\"type\")) + \\ gg.geom_line() + \\ gg.theme_bw() +", "Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False) #", "= PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score)", "+ gg.labs(y=\"Average training and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)", "verbose=False) # Question 8 - Compare best Ridge model, best", "k\" p = gg.ggplot(df, gg.aes(\"k\", \"avg error\", color=\"type\")) + \\", "values to evaluate for each of the algorithms \"\"\" #", "+ \\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\",", "= cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\":", "Simulate data from a polynomial model and use cross-validation to", "* (x - 1) * (x - 2) X =", "n_samples: int, default=50 Number of samples to generate n_evaluations: int,", "validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran, \"avg error\": train_err, \"type\": \"train", "print(\"Linear Regression Loss = \", lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ ==", "from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression", "\\ gg.theme_bw() + gg.labs(y=\"Average training and validation errors\", title=title) gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png',", "{round(ran[best_lam], 3)}\") print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr", "df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\": \"test\"}) x_stat =", "IMLearn.utils import split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import", "and testing portions X, y = datasets.load_diabetes(return_X_y=True, as_frame=True) train_X, train_y,", "Different Lambda\" p = gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\")) +", "train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1", "default = 5 Noise level to simulate in responses \"\"\"", "\"type\": \"validation error\"}) df = pd.concat([df1, df2]) title = f\"", "default=100 Number of samples to generate noise: float, default =", "\"type\": \"Train\"}) df_test = pd.DataFrame({\"x\": test_X.squeeze(), \"y\": test_y, \"type\": \"test\"})", "cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11),", "= \", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int = 50, n_evaluations:", "lr.loss(test_X.to_numpy(), test_y.to_numpy())) if __name__ == '__main__': np.random.seed(0) select_polynomial_degree() select_polynomial_degree(noise=0) select_polynomial_degree(n_samples=1500,", "Number of samples to generate noise: float, default = 5", "train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3))", "gg.geom_point() + \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \\ gg.labs(y=\"Average training", "test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3)) df_train = pd.DataFrame({\"x\":", "import datasets from IMLearn.metrics import mean_square_error from IMLearn.utils import split_train_test", "samples to generate n_evaluations: int, default = 500 Number of", "ran: rg = learner(lam) train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),", "pd from sklearn import datasets from IMLearn.metrics import mean_square_error from", "{n_samples}.png', plot=p, verbose=False) # Question 3 - Using best value", "split_train_test from IMLearn.model_selection import cross_validate from IMLearn.learners.regressors import PolynomialFitting, LinearRegression,", "error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"k\": range(11), \"avg", "train_y.to_numpy()) y_pred = rg.predict(test_X.to_numpy()) print(f\"best lambda {name} = {round(ran[best_lam], 3)}\")", "- Using best value of k, fit a k-degree polynomial", "+ 1) * (x - 1) * (x - 2)", "# Question 6 - Load diabetes dataset and split into", "- Compare best Ridge model, best Lasso model and Least", "test error best_k = np.argmin(np.array(validation_err)) pf = PolynomialFitting(int(best_k)) pf.fit(train_X.to_numpy(), train_y.to_numpy())", "pd.concat([df_test, df_train]) title = f\"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise", "0,1,...,10 train_err = [] validation_err = [] for k in", "diabetes dataset use cross-validation to select the best fitting regularization", "= 500): \"\"\" Using sklearn's diabetes dataset use cross-validation to", "for Ridge and Lasso regressions Parameters ---------- n_samples: int, default=50", "diabetes dataset and split into training and testing portions X,", "as pd from sklearn import datasets from IMLearn.metrics import mean_square_error", "f(x): return (x + 3) * (x + 2) *", "\\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) + \\ gg.geom_line(df_stat, gg.aes(\"x\", \"y\"))", "y_pred = pf.predict(test_X.to_numpy()) print(\"best k =\", best_k) print(\"Test = \",", "responses \"\"\" # Question 1 - Generate dataset for model", "dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise", "parameter values for Ridge and Lasso regressions Parameters ---------- n_samples:", "cross-validation to select the best fitting degree Parameters ---------- n_samples:", "3) * (x + 2) * (x + 1) *", "from IMLearn.metrics import mean_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection", "__future__ import annotations import numpy as np import pandas as", "Squares model best_lam = np.argmin(np.array(validation_err)) rg = learner(ran[best_lam]) rg.fit(train_X.to_numpy(), train_y.to_numpy())", "\"y\": f(x_stat), \"type\": \"Model\"}) df = pd.concat([df_test, df_train]) title =", "np import pandas as pd from sklearn import datasets from", "pd.concat([df1, df2]) title = f\" Cross Validation for Polynomial Fitting", "+ 2) * (x + 1) * (x - 1)", "- 2) X = np.linspace(-1.2, 2, n_samples) y = f(X)", "X.iloc[50:, ], y[50:] # Question 7 - Perform CV for", "for different values of the regularization parameter for Ridge and", "print(f\"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}\") lr = LinearRegression()", "3 - Using best value of k, fit a k-degree", "RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\", Lasso, np.linspace(0.001, 0.5, 500))]: train_err", "Noise: {noise}, Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p,", "\"type\": \"train error\"}) df2 = pd.DataFrame({\"lambda\": ran, \"avg error\": validation_err,", "color=\"type\")) + \\ gg.geom_point() + \\ gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) +", "int = 50, n_evaluations: int = 500): \"\"\" Using sklearn's", "error\": validation_err, \"type\": \"validation error\"}) df = pd.concat([df1, df2]) title", "algorithms \"\"\" # Question 6 - Load diabetes dataset and", "5 Noise level to simulate in responses \"\"\" # Question", "best fitting degree Parameters ---------- n_samples: int, default=100 Number of", "\"avg error\": train_err, \"type\": \"train error\"}) df2 = pd.DataFrame({\"lambda\": ran,", "to generate n_evaluations: int, default = 500 Number of regularization", "verbose=False) # Question 3 - Using best value of k,", "def select_polynomial_degree(n_samples: int = 100, noise: float = 5): \"\"\"", "100, noise: float = 5): \"\"\" Simulate data from a", "2)}\") lr = LinearRegression() lr.fit(train_X.to_numpy(), train_y.to_numpy()) print(\"Linear Regression Loss =", "\"type\": \"train error\"}) df2 = pd.DataFrame({\"k\": range(11), \"avg error\": validation_err,", "from a polynomial model and use cross-validation to select the", "a polynomial model and use cross-validation to select the best", "validation errors\", title=f\"{title} \\nWith Noise: {noise}, Num of samples: {n_samples}\")", "into training- and testing portions def f(x): return (x +", "name, learner, ran in [(\"Ridge\", RidgeRegression, np.linspace(0.001, 0.05, 500)), (\"Lasso\",", "plotnine as gg def select_polynomial_degree(n_samples: int = 100, noise: float", "7 - Perform CV for different values of the regularization", "\", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int = 50, n_evaluations: int", "error\", color=\"type\")) + \\ gg.geom_line() + \\ gg.theme_bw() + gg.labs(y=\"Average", "# Question 2 - Perform CV for polynomial fitting with", "title=f\"{title} \\nWith Noise: {noise}, Num of samples: {n_samples}\") gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise}", "float = 5): \"\"\" Simulate data from a polynomial model", "df = pd.concat([df1, df2]) title = f\" Cross Validation for", "parameter values to evaluate for each of the algorithms \"\"\"", "df2]) title = f\"{name} Regularization Cross Validate Over Different Lambda\"", "print(\"Validation = \", round(validation_err[best_k], 2)) def select_regularization_parameter(n_samples: int = 50,", "Parameters ---------- n_samples: int, default=100 Number of samples to generate", "in range(11): pf = PolynomialFitting(k) train_score, validation_score = cross_validate(pf, train_X.to_numpy(),", "of regularization parameter values to evaluate for each of the", "with degrees 0,1,...,10 train_err = [] validation_err = [] for", "train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1", "train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]", "best Lasso model and Least Squares model best_lam = np.argmin(np.array(validation_err))", "noise: float, default = 5 Noise level to simulate in", "Question 3 - Using best value of k, fit a", "and Lasso regressions for name, learner, ran in [(\"Ridge\", RidgeRegression,", "noise: float = 5): \"\"\" Simulate data from a polynomial", "and testing portions def f(x): return (x + 3) *", "IMLearn.metrics import mean_square_error from IMLearn.utils import split_train_test from IMLearn.model_selection import", "select the best fitting regularization parameter values for Ridge and", "Lasso model and Least Squares model best_lam = np.argmin(np.array(validation_err)) rg", "mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11), \"avg error\": train_err,", "training and validation errors\", title=f\"{title} \\nWith Noise: {noise}, Num of", "as_frame=True) train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:,", "to evaluate for each of the algorithms \"\"\" # Question", "= X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:] # Question 7", "mean_square_error) train_err.append(train_score) validation_err.append(validation_score) df1 = pd.DataFrame({\"lambda\": ran, \"avg error\": train_err,", "p = gg.ggplot() + \\ gg.geom_point(df, gg.aes(\"x\", \"y\", color=\"type\")) +", "= np.linspace(-1.4, 2, 100) df_stat = pd.DataFrame({\"x\": x_stat, \"y\": f(x_stat),", "\"validation error\"}) df = pd.concat([df1, df2]) title = f\"{name} Regularization", "Compare best Ridge model, best Lasso model and Least Squares", "gg.ggplot(df, gg.aes(\"lambda\", \"avg error\", color=\"type\")) + \\ gg.geom_line() + \\", "validation_err.append(validation_score) df1 = pd.DataFrame({\"k\": range(11), \"avg error\": train_err, \"type\": \"train" ]
[ "obs projects always have 50 chapters # This allows empty", "project media object into formats usable in the catalog :param", "= _parse_resource(media['resource'], content_version) if 'projects' in media: for project in", "content_version}) formats = [] if 'media' in project: for media", "dictionary of expansion variables for media items. :param self: :param", "expansion variables for media items. :param self: :param media_block: :param", "return formats def _make_expansion_variables(media_block, content_version): \"\"\" Creates a dictionary of", "'modified': '', 'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'],", "object :type media: dict :param content_version: the current version of", "quality: format['quality'] = quality return format def _parse_project(project, content_version, chapters_ids):", "to inspect :param chapters: a list of chapter ids :param", "usable in the catalog :param resource: the media object :type", "expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in media and len(media['quality'])", "chapters = _parse_project_chapter(chapter_url, chapters) if chapters: return chapters return None", "if 'resource' in media: resource_formats = _parse_resource(media['resource'], content_version) if 'projects'", "resource: the media object :type resource: dict :param content_version: the", "the source content :type content_version: string :return: a list of", "one place :param media: the media object to inspect :param", ":type media: dict :param content_version: the current version of the", "chapter url :return: \"\"\" if 'chapter_url' in media: chapter_url =", "'', 'build_rules': [ 'signing.sign_given_url' ] } formats.append(format) return formats def", "\"\"\" Creates a dictionary of expansion variables for media items.", "else: raise Exception('Invalid replacement target \"{}\". Expected string but received", "quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: # build a single format", "multiple places this handles it in one place :param media:", "content_version) if 'projects' in media: for project in media['projects']: project_id", "vars def _expand_keys(target, replacements): \"\"\" Replaces all the dict keys", "'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules':", "'build_rules': [ 'signing.sign_given_url' ] } formats.append(format) return formats def _make_expansion_variables(media_block,", "project_formats a list of resource formats and dictionary of project", "{'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in media", "book # only book RCs can have chapter formats formats", "chapters return None def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter formats", "well formatted list of chapter ids and check if the", "def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter formats for use in", "in black_list: if key in vars: del vars[key] # TRICKY:", "in media['projects']: project_id = project['identifier'] chapters = [] if project_id", "[] if 'media' in project: for media in project['media']: media_version", "expansion_vars): \"\"\" This is a wrapper around the method `_parse_project_chapter`.", "= _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids,", ":param chapters: a list of chapter ids :type chapters: list", "= quality return format def _parse_project(project, content_version, chapters_ids): \"\"\" Converts", "source content :type content_version: string :param chapters_ids: a list of", "in the catalog :param project: the media object :type project:", "'media' in project: for media in project['media']: media_version = _expand_keys(media['version'],", "of project formats \"\"\" resource_formats = [] project_formats = {}", "inspect :param chapters: a list of chapter ids :param expansion_vars:", "single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters", "version of the source content :type content_version: string :param project_chapters:", "projects always have 50 chapters # This allows empty projects", "project_chapters: a dictionary of project chapters :type project_chapters: dict :return:", "chapters) if chapters: return chapters return None def _parse_project_chapter(chapter_url, chapters):", "= target if not isinstance(replacements, dict): raise Exception('Expected dictionary of", "else: # build a single format format = _make_format(source_version=source_version, media_version=media_version,", "list): result = re.sub(r'{\\s*' + key + '\\s*}', '{}'.format(replacements[key]), result)", "project_formats[project_id] = _parse_project(project, content_version, chapters) return resource_formats, project_formats def _parse_resource(resource,", "and len(media['quality']) > 0: # build format for each quality", "media object :type project: dict :param content_version: the current version", ":type chapters_ids: list :return: a list of formats \"\"\" source_version", "'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url,", "'', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules':", "of chapter ids :type chapters: list :return: \"\"\" # TODO:", "0, 'length': 0, 'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter':", "a list of formats \"\"\" source_version = _expand_keys(project['version'], {'latest': content_version})", "in the catalog :param resource: the media object :type resource:", "= [] project_formats = {} if 'resource' in media: resource_formats", "catalog :param media: the media object :type media: dict :param", "chapter ids :type chapters: list :return: \"\"\" # TODO: this", "{'chapter': chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } formats.append(format)", "string :return: a list of formats \"\"\" source_version = _expand_keys(resource['version'],", "elif isinstance(target, int): return target else: raise Exception('Invalid replacement target", "list :return: \"\"\" # TODO: this requires that we give", "by brackets {} :param target: :param replacements: :return: \"\"\" if", "always have 50 chapters # This allows empty projects to", "{'latest': content_version}) formats = [] if 'media' in resource: for", "'signing.sign_given_url' ] } formats.append(format) return formats def _make_expansion_variables(media_block, content_version): \"\"\"", "ids :type chapters: list :return: \"\"\" # TODO: this requires", "= [] if project_id == 'obs': # TRICKY: obs projects", "format def _parse_project(project, content_version, chapters_ids): \"\"\" Converts a project media", "single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format)", "ids and check if the Rc is a book #", "\"\"\" Converts a project media object into formats usable in", "TODO: this requires that we give a well formatted list", "the dict values. Keys in the string must be delimited", "ids :param expansion_vars: a dictionary of variables that may be", "media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: # build a single", "def _expand_keys(target, replacements): \"\"\" Replaces all the dict keys found", "not isinstance(replacements[key], list): result = re.sub(r'{\\s*' + key + '\\s*}',", "def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This is a wrapper around", "RCs can have chapter formats formats = [] for chapter_id", "project formats \"\"\" resource_formats = [] project_formats = {} if", "= _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if chapters: return", "chapters: format = { 'size': 0, 'length': 0, 'modified': '',", "expansion variable in urls is not explicitly stated in the", "resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version)", "if not isinstance(replacements, dict): raise Exception('Expected dictionary of replacements but", "may be expanded in the chapter url :return: \"\"\" if", "format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats", "'\\s*}', '{}'.format(replacements[key]), result) return result elif isinstance(target, int): return target", ":param media_block: :param content_version: :return: \"\"\" vars = copy.copy(media_block) #", "isinstance(replacements[key], list): result = re.sub(r'{\\s*' + key + '\\s*}', '{}'.format(replacements[key]),", "it's a common misunderstanding so we allow it. vars['latest'] =", "media: the media object to inspect :param chapters: a list", "quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters =", ":param project_chapters: a dictionary of project chapters :type project_chapters: dict", "= [] if 'media' in project: for media in project['media']:", "wrapper around the method `_parse_project_chapter`. Since we routinely conditionally prepare", "places this handles it in one place :param media: the", "list of chapter identifiers in the project :type chapters_ids: list", "object into formats usable in the catalog :param resource: the", "`_parse_project_chapter`. Since we routinely conditionally prepare chapters in multiple places", "list of chapter ids :type chapters: list :return: \"\"\" #", "'build_rules': [ 'signing.sign_given_url' ] } if quality: format['quality'] = quality", "chapters_ids: list :return: a list of formats \"\"\" source_version =", "in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters)", "formats \"\"\" source_version = _expand_keys(project['version'], {'latest': content_version}) formats = []", "quality in media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version,", ":param self: :param media_block: :param content_version: :return: \"\"\" vars =", "stated in the spec, # but it's a common misunderstanding", "vars = copy.copy(media_block) # strip black listed keys black_list =", "result = target if not isinstance(replacements, dict): raise Exception('Expected dictionary", "formats and dictionary of project formats \"\"\" resource_formats = []", "media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version, media_version,", "string :param chapters_ids: a list of chapter identifiers in the", "we routinely conditionally prepare chapters in multiple places this handles", "Converts a media object into formats usable in the catalog", "projects to still publish media. for x in range(1, 51):", "a dictionary of project chapters :type project_chapters: dict :return: resource_formats,", "if 'media' in project: for media in project['media']: media_version =", "\"\"\" if 'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters", "so we allow it. vars['latest'] = '{}'.format(content_version) return vars def", "= _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: # build", "chapter formats formats = [] for chapter_id in chapters: format", "quality for quality in media['quality']: expansion_vars['quality'] = quality format =", "quality, media, expansion_vars): format = { 'format': '', 'modified': '',", "} if quality: format['quality'] = quality return format def _parse_project(project,", "= { 'size': 0, 'length': 0, 'modified': '', 'identifier': chapter_id,", "list of chapter ids and check if the Rc is", "chapters, expansion_vars): \"\"\" This is a wrapper around the method", "the media object :type media: dict :param content_version: the current", "of formats \"\"\" source_version = _expand_keys(resource['version'], {'latest': content_version}) formats =", "in resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media,", "for use in the catalog :param chapter_url: the url template", ":param chapter_url: the url template that will be used in", "as an expansion variable in urls is not explicitly stated", "source_version = _expand_keys(resource['version'], {'latest': content_version}) formats = [] if 'media'", "content_version): \"\"\" Creates a dictionary of expansion variables for media", "chapters_ids: a list of chapter identifiers in the project :type", "content_version, project_chapters): \"\"\" Converts a media object into formats usable", "'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url'", "# TRICKY: using `latest` as an expansion variable in urls", "media. for x in range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2))", "expansion_vars=expansion_vars) formats.append(format) else: # build a single format format =", "format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters =", "for x in range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2)) if", "'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] }", "project['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version)", "in the string must be delimited by brackets {} :param", "raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements))) for key", "expansion_vars): format = { 'format': '', 'modified': '', 'size': 0,", "resource_formats, project_formats a list of resource formats and dictionary of", "[] if 'media' in resource: for media in resource['media']: media_version", "variable in urls is not explicitly stated in the spec,", "_make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version,", "dictionary of project chapters :type project_chapters: dict :return: resource_formats, project_formats", "publish media. for x in range(1, 51): # chapters 1..50", "# TODO: this requires that we give a well formatted", ":type resource: dict :param content_version: the current version of the", "chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) return formats", "media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media,", "for project in media['projects']: project_id = project['identifier'] chapters = []", "version of the source content :type content_version: string :return: a", "= _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality'", "= _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids,", ":return: a list of formats \"\"\" source_version = _expand_keys(resource['version'], {'latest':", "of the source content :type content_version: string :param project_chapters: a", "if the Rc is a book # only book RCs", "method `_parse_project_chapter`. Since we routinely conditionally prepare chapters in multiple", "chapter_url: the url template that will be used in the", ":type project: dict :param content_version: the current version of the", "`latest` as an expansion variable in urls is not explicitly", "50 chapters # This allows empty projects to still publish", "object :type project: dict :param content_version: the current version of", "# This allows empty projects to still publish media. for", "chapters # This allows empty projects to still publish media.", "of resource formats and dictionary of project formats \"\"\" resource_formats", "TRICKY: obs projects always have 50 chapters # This allows", "in project['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media,", "\"\"\" if isinstance(target, basestring) or isinstance(target, str): result = target", "Rc is a book # only book RCs can have", "'media' in resource: for media in resource['media']: media_version = _expand_keys(media['version'],", "\"\"\" source_version = _expand_keys(resource['version'], {'latest': content_version}) formats = [] if", "a list of resource formats and dictionary of project formats", "in vars: del vars[key] # TRICKY: using `latest` as an", "project_chapters: dict :return: resource_formats, project_formats a list of resource formats", "This allows empty projects to still publish media. for x", "source_version = _expand_keys(project['version'], {'latest': content_version}) formats = [] if 'media'", "of expansion variables for media items. :param self: :param media_block:", "it in one place :param media: the media object to", "the source content :type content_version: string :param project_chapters: a dictionary", "url :return: \"\"\" if 'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'],", "_expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if chapters: return chapters", "used in the formats :param chapters: a list of chapter", "chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters = project_chapters[project_id]", ":return: \"\"\" vars = copy.copy(media_block) # strip black listed keys", "chapter ids :param expansion_vars: a dictionary of variables that may", "expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if chapters: return chapters return", "] } if quality: format['quality'] = quality return format def", "catalog :param project: the media object :type project: dict :param", "brackets {} :param target: :param replacements: :return: \"\"\" if isinstance(target,", "> 0: # build format for each quality for quality", "using `latest` as an expansion variable in urls is not", "the catalog :param media: the media object :type media: dict", "_make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: # build a", "media: the media object :type media: dict :param content_version: the", "[ 'signing.sign_given_url' ] } if quality: format['quality'] = quality return", "is a book # only book RCs can have chapter", "def _make_expansion_variables(media_block, content_version): \"\"\" Creates a dictionary of expansion variables", "into formats usable in the catalog :param project: the media", "{ 'size': 0, 'length': 0, 'modified': '', 'identifier': chapter_id, 'url':", "items. :param self: :param media_block: :param content_version: :return: \"\"\" vars", "explicitly stated in the spec, # but it's a common", "+ key + '\\s*}', '{}'.format(replacements[key]), result) return result elif isinstance(target,", "range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters:", "a resource media object into formats usable in the catalog", "['url', 'chapter_url'] for key in black_list: if key in vars:", "but received {}'.format(type(replacements))) for key in replacements: if not isinstance(replacements[key],", "key in vars: del vars[key] # TRICKY: using `latest` as", "in urls is not explicitly stated in the spec, #", "quality return format def _parse_project(project, content_version, chapters_ids): \"\"\" Converts a", "content :type content_version: string :param project_chapters: a dictionary of project", "media object :type resource: dict :param content_version: the current version", ":param content_version: :return: \"\"\" vars = copy.copy(media_block) # strip black", "{}'.format(type(replacements))) for key in replacements: if not isinstance(replacements[key], list): result", "the source content :type content_version: string :param chapters_ids: a list", "project_id in project_chapters: chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version,", "return None def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter formats for", ":return: \"\"\" # TODO: this requires that we give a", "of variables that may be expanded in the chapter url", "the Rc is a book # only book RCs can", "around the method `_parse_project_chapter`. Since we routinely conditionally prepare chapters", "project chapters :type project_chapters: dict :return: resource_formats, project_formats a list", "of chapter ids :param expansion_vars: a dictionary of variables that", "chapter ids and check if the Rc is a book", "content_version) if 'quality' in media and len(media['quality']) > 0: #", "_expand_keys(target, replacements): \"\"\" Replaces all the dict keys found in", "in the string with the dict values. Keys in the", "'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } if quality: format['quality']", ":return: resource_formats, project_formats a list of resource formats and dictionary", "conditionally prepare chapters in multiple places this handles it in", "string with the dict values. Keys in the string must", ":param media: the media object to inspect :param chapters: a", "formats def _make_expansion_variables(media_block, content_version): \"\"\" Creates a dictionary of expansion", "media object into formats usable in the catalog :param media:", "media and len(media['quality']) > 0: # build format for each", "# but it's a common misunderstanding so we allow it.", "media=media, expansion_vars=expansion_vars) formats.append(format) else: # build a single format format", "quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters:", "project_id = project['identifier'] chapters = [] if project_id == 'obs':", "for media in resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars", "_prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) else:", "dict values. Keys in the string must be delimited by", "for media in project['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars", "'', 'modified': '', 'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor':", "the project :type chapters_ids: list :return: a list of formats", "str): result = target if not isinstance(replacements, dict): raise Exception('Expected", "the media object :type resource: dict :param content_version: the current", "= _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats def", "a single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars)", "_parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter formats for use in the", "# chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters =", "in resource: for media in resource['media']: media_version = _expand_keys(media['version'], {'latest':", "dict :return: resource_formats, project_formats a list of resource formats and", "of formats \"\"\" source_version = _expand_keys(project['version'], {'latest': content_version}) formats =", "'size': 0, 'length': 0, 'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url,", "dict keys found in the string with the dict values.", "list of formats \"\"\" source_version = _expand_keys(resource['version'], {'latest': content_version}) formats", "= _parse_project_chapter(chapter_url, chapters) if chapters: return chapters return None def", "'length': 0, 'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}),", "_expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in", "_expand_keys(project['version'], {'latest': content_version}) formats = [] if 'media' in project:", "media: resource_formats = _parse_resource(media['resource'], content_version) if 'projects' in media: for", "formats \"\"\" resource_formats = [] project_formats = {} if 'resource'", "= _parse_project(project, content_version, chapters) return resource_formats, project_formats def _parse_resource(resource, content_version):", "url template that will be used in the formats :param", "isinstance(target, int): return target else: raise Exception('Invalid replacement target \"{}\".", "content_version): \"\"\" Converts a resource media object into formats usable", "else: # build single format format = _make_format(source_version=source_version, media_version=media_version, quality=None,", "\"\"\" resource_formats = [] project_formats = {} if 'resource' in", "resource_formats = _parse_resource(media['resource'], content_version) if 'projects' in media: for project", "that will be used in the formats :param chapters: a", "a list of chapter ids :type chapters: list :return: \"\"\"", "misunderstanding so we allow it. vars['latest'] = '{}'.format(content_version) return vars", "re import copy def parse_media(media, content_version, project_chapters): \"\"\" Converts a", "Replaces all the dict keys found in the string with", "x in range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2)) if project_id", "= project['identifier'] chapters = [] if project_id == 'obs': #", "in the spec, # but it's a common misunderstanding so", "prepare chapters in multiple places this handles it in one", "key + '\\s*}', '{}'.format(replacements[key]), result) return result elif isinstance(target, int):", "project_formats = {} if 'resource' in media: resource_formats = _parse_resource(media['resource'],", "resource_formats = [] project_formats = {} if 'resource' in media:", "if isinstance(target, basestring) or isinstance(target, str): result = target if", "for quality in media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version,", "media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if", "= [] if 'media' in resource: for media in resource['media']:", ":param expansion_vars: a dictionary of variables that may be expanded", "in the chapter url :return: \"\"\" if 'chapter_url' in media:", "variables for media items. :param self: :param media_block: :param content_version:", "into formats usable in the catalog :param resource: the media", "self: :param media_block: :param content_version: :return: \"\"\" vars = copy.copy(media_block)", "= '{}'.format(content_version) return vars def _expand_keys(target, replacements): \"\"\" Replaces all", "chapters = [] if project_id == 'obs': # TRICKY: obs", "isinstance(target, str): result = target if not isinstance(replacements, dict): raise", "return target else: raise Exception('Invalid replacement target \"{}\". Expected string", "a wrapper around the method `_parse_project_chapter`. Since we routinely conditionally", "= chapters formats.append(format) else: # build single format format =", "in media['quality']: expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality,", "and dictionary of project formats \"\"\" resource_formats = [] project_formats", "Converts a resource media object into formats usable in the", "a common misunderstanding so we allow it. vars['latest'] = '{}'.format(content_version)", "of chapter identifiers in the project :type chapters_ids: list :return:", "received {}'.format(type(replacements))) for key in replacements: if not isinstance(replacements[key], list):", "'{}'.format(replacements[key]), result) return result elif isinstance(target, int): return target else:", "if 'projects' in media: for project in media['projects']: project_id =", "media object into formats usable in the catalog :param project:", "in replacements: if not isinstance(replacements[key], list): result = re.sub(r'{\\s*' +", "1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters = project_chapters[project_id] project_formats[project_id]", "'obs': # TRICKY: obs projects always have 50 chapters #", "book RCs can have chapter formats formats = [] for", "formats def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This is a wrapper", "list :return: a list of formats \"\"\" source_version = _expand_keys(project['version'],", "and check if the Rc is a book # only", "'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature':", "_prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) return", "media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if", "black listed keys black_list = ['url', 'chapter_url'] for key in", ":param replacements: :return: \"\"\" if isinstance(target, basestring) or isinstance(target, str):", "= _expand_keys(project['version'], {'latest': content_version}) formats = [] if 'media' in", "list of chapter ids :param expansion_vars: a dictionary of variables", "list of formats \"\"\" source_version = _expand_keys(project['version'], {'latest': content_version}) formats", "if chapters: format['chapters'] = chapters formats.append(format) else: # build single", "in media and len(media['quality']) > 0: # build format for", "expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } if quality:", "build a single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media,", "return formats def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This is a", "content_version: string :param project_chapters: a dictionary of project chapters :type", "_parse_resource(resource, content_version): \"\"\" Converts a resource media object into formats", "_expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } if", "format['chapters'] = chapters formats.append(format) else: # build single format format", "a dictionary of variables that may be expanded in the", "import copy def parse_media(media, content_version, project_chapters): \"\"\" Converts a media", "[] if project_id == 'obs': # TRICKY: obs projects always", "_parse_project(project, content_version, chapters) return resource_formats, project_formats def _parse_resource(resource, content_version): \"\"\"", "chapters: a list of chapter ids :type chapters: list :return:", "have chapter formats formats = [] for chapter_id in chapters:", "# build a single format format = _make_format(source_version=source_version, media_version=media_version, quality=None,", "vars['latest'] = '{}'.format(content_version) return vars def _expand_keys(target, replacements): \"\"\" Replaces", "the media object to inspect :param chapters: a list of", "object :type resource: dict :param content_version: the current version of", "target else: raise Exception('Invalid replacement target \"{}\". Expected string but", "the string with the dict values. Keys in the string", "usable in the catalog :param project: the media object :type", "= ['url', 'chapter_url'] for key in black_list: if key in", "'format': '', 'modified': '', 'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version),", "chapters): \"\"\" Generates chapter formats for use in the catalog", "in one place :param media: the media object to inspect", "content_version}) formats = [] if 'media' in resource: for media", "build format for each quality for quality in media['quality']: expansion_vars['quality']", "format = { 'size': 0, 'length': 0, 'modified': '', 'identifier':", "replacements but received {}'.format(type(replacements))) for key in replacements: if not", "variables that may be expanded in the chapter url :return:", "'chapter_url'] for key in black_list: if key in vars: del", "it. vars['latest'] = '{}'.format(content_version) return vars def _expand_keys(target, replacements): \"\"\"", "project_formats def _parse_resource(resource, content_version): \"\"\" Converts a resource media object", "dict :param content_version: the current version of the source content", "[] for chapter_id in chapters: format = { 'size': 0,", "string must be delimited by brackets {} :param target: :param", "black_list = ['url', 'chapter_url'] for key in black_list: if key", "check if the Rc is a book # only book", "still publish media. for x in range(1, 51): # chapters", "can have chapter formats formats = [] for chapter_id in", "in project: for media in project['media']: media_version = _expand_keys(media['version'], {'latest':", "the media object :type project: dict :param content_version: the current", "\"\"\" vars = copy.copy(media_block) # strip black listed keys black_list", "== 'obs': # TRICKY: obs projects always have 50 chapters", "'{}'.format(content_version) return vars def _expand_keys(target, replacements): \"\"\" Replaces all the", "target: :param replacements: :return: \"\"\" if isinstance(target, basestring) or isinstance(target,", "dictionary of replacements but received {}'.format(type(replacements))) for key in replacements:", "build single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars)", "for media items. :param self: :param media_block: :param content_version: :return:", "key in black_list: if key in vars: del vars[key] #", "media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars = _make_expansion_variables(media, content_version) if", "media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if", "'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'],", "current version of the source content :type content_version: string :param", "formats.append(format) return formats def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This is", "content_version: :return: \"\"\" vars = copy.copy(media_block) # strip black listed", "basestring) or isinstance(target, str): result = target if not isinstance(replacements,", "media=media, expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version, media_version, quality, media,", "project :type chapters_ids: list :return: a list of formats \"\"\"", "media object into formats usable in the catalog :param resource:", "keys found in the string with the dict values. Keys", "in the catalog :param media: the media object :type media:", "Exception('Expected dictionary of replacements but received {}'.format(type(replacements))) for key in", "formats.append(format) return formats def _make_expansion_variables(media_block, content_version): \"\"\" Creates a dictionary", "place :param media: the media object to inspect :param chapters:", "in project_chapters: chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters)", "formats usable in the catalog :param resource: the media object", "expansion_vars['quality'] = quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars)", "the catalog :param resource: the media object :type resource: dict", "'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ]", "in range(1, 51): # chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in", "resource_formats, project_formats def _parse_resource(resource, content_version): \"\"\" Converts a resource media", "= re.sub(r'{\\s*' + key + '\\s*}', '{}'.format(replacements[key]), result) return result", "vars: del vars[key] # TRICKY: using `latest` as an expansion", "chapters_ids): \"\"\" Converts a project media object into formats usable", "resource: for media in resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version})", "0, 'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature':", "'signing.sign_given_url' ] } if quality: format['quality'] = quality return format", "chapters formats.append(format) else: # build single format format = _make_format(source_version=source_version,", "\"\"\" source_version = _expand_keys(project['version'], {'latest': content_version}) formats = [] if", "have 50 chapters # This allows empty projects to still", "project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters) return resource_formats, project_formats def", "project in media['projects']: project_id = project['identifier'] chapters = [] if", "for chapter_id in chapters: format = { 'size': 0, 'length':", "project['identifier'] chapters = [] if project_id == 'obs': # TRICKY:", "use in the catalog :param chapter_url: the url template that", "not explicitly stated in the spec, # but it's a", "all the dict keys found in the string with the", "isinstance(replacements, dict): raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements)))", "a list of chapter identifiers in the project :type chapters_ids:", "object into formats usable in the catalog :param media: the", "'projects' in media: for project in media['projects']: project_id = project['identifier']", "del vars[key] # TRICKY: using `latest` as an expansion variable", "if 'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters =", "= _expand_keys(resource['version'], {'latest': content_version}) formats = [] if 'media' in", "_parse_project(project, content_version, chapters_ids): \"\"\" Converts a project media object into", "the method `_parse_project_chapter`. Since we routinely conditionally prepare chapters in", "chapter_url = _expand_keys(media['chapter_url'], expansion_vars) chapters = _parse_project_chapter(chapter_url, chapters) if chapters:", ":type content_version: string :param project_chapters: a dictionary of project chapters", "is a wrapper around the method `_parse_project_chapter`. Since we routinely", "requires that we give a well formatted list of chapter", "formats = [] if 'media' in project: for media in", ":return: \"\"\" if 'chapter_url' in media: chapter_url = _expand_keys(media['chapter_url'], expansion_vars)", "media: dict :param content_version: the current version of the source", "of the source content :type content_version: string :param chapters_ids: a", "\"\"\" # TODO: this requires that we give a well", "formats usable in the catalog :param project: the media object", "formats.append(format) else: # build single format format = _make_format(source_version=source_version, media_version=media_version,", "content_version: string :return: a list of formats \"\"\" source_version =", "project_chapters): \"\"\" Converts a media object into formats usable in", "quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters:", "copy.copy(media_block) # strip black listed keys black_list = ['url', 'chapter_url']", "expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) else: # build", "formats formats = [] for chapter_id in chapters: format =", "formats def _make_format(source_version, media_version, quality, media, expansion_vars): format = {", "expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] =", "_parse_project_chapter(chapter_url, chapters) if chapters: return chapters return None def _parse_project_chapter(chapter_url,", "return formats def _make_format(source_version, media_version, quality, media, expansion_vars): format =", ":type content_version: string :return: a list of formats \"\"\" source_version", "= _make_expansion_variables(media, content_version) if 'quality' in media and len(media['quality']) >", ":param project: the media object :type project: dict :param content_version:", "will be used in the formats :param chapters: a list", "only book RCs can have chapter formats formats = []", "with the dict values. Keys in the string must be", "Keys in the string must be delimited by brackets {}", "Converts a project media object into formats usable in the", "formats.append(format) else: # build a single format format = _make_format(source_version=source_version,", "# only book RCs can have chapter formats formats =", "None def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter formats for use", "the string must be delimited by brackets {} :param target:", "current version of the source content :type content_version: string :return:", "handles it in one place :param media: the media object", "content_version: the current version of the source content :type content_version:", "urls is not explicitly stated in the spec, # but", "return vars def _expand_keys(target, replacements): \"\"\" Replaces all the dict", "this handles it in one place :param media: the media", "media['projects']: project_id = project['identifier'] chapters = [] if project_id ==", "'quality' in media and len(media['quality']) > 0: # build format", "formats = [] for chapter_id in chapters: format = {", "result) return result elif isinstance(target, int): return target else: raise", "format['quality'] = quality return format def _parse_project(project, content_version, chapters_ids): \"\"\"", "if project_id == 'obs': # TRICKY: obs projects always have", "a project media object into formats usable in the catalog", "must be delimited by brackets {} :param target: :param replacements:", "chapters: format['chapters'] = chapters formats.append(format) else: # build single format", "_make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)", "values. Keys in the string must be delimited by brackets", "media object to inspect :param chapters: a list of chapter", "in media: for project in media['projects']: project_id = project['identifier'] chapters", "in multiple places this handles it in one place :param", "project: dict :param content_version: the current version of the source", "chapter identifiers in the project :type chapters_ids: list :return: a", "for key in black_list: if key in vars: del vars[key]", "\"\"\" This is a wrapper around the method `_parse_project_chapter`. Since", "chapters in multiple places this handles it in one place", "chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters) return resource_formats,", "catalog :param resource: the media object :type resource: dict :param", "format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media,", "content_version, chapters) return resource_formats, project_formats def _parse_resource(resource, content_version): \"\"\" Converts", "[] project_formats = {} if 'resource' in media: resource_formats =", "expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version, media_version, quality, media, expansion_vars):", "'resource' in media: resource_formats = _parse_resource(media['resource'], content_version) if 'projects' in", "for each quality for quality in media['quality']: expansion_vars['quality'] = quality", "content_version: string :param chapters_ids: a list of chapter identifiers in", ":param resource: the media object :type resource: dict :param content_version:", "is not explicitly stated in the spec, # but it's", "in media: resource_formats = _parse_resource(media['resource'], content_version) if 'projects' in media:", "result elif isinstance(target, int): return target else: raise Exception('Invalid replacement", "replacements: :return: \"\"\" if isinstance(target, basestring) or isinstance(target, str): result", "formats for use in the catalog :param chapter_url: the url", "in the formats :param chapters: a list of chapter ids", "chapters :type project_chapters: dict :return: resource_formats, project_formats a list of", "allows empty projects to still publish media. for x in", "formats usable in the catalog :param media: the media object", "0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars),", "media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters']", "chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) else: #", "chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } formats.append(format) return", "_make_expansion_variables(media_block, content_version): \"\"\" Creates a dictionary of expansion variables for", "+ '\\s*}', '{}'.format(replacements[key]), result) return result elif isinstance(target, int): return", "int): return target else: raise Exception('Invalid replacement target \"{}\". Expected", "version of the source content :type content_version: string :param chapters_ids:", "_prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This is a wrapper around the", ":type content_version: string :param chapters_ids: a list of chapter identifiers", "parse_media(media, content_version, project_chapters): \"\"\" Converts a media object into formats", "allow it. vars['latest'] = '{}'.format(content_version) return vars def _expand_keys(target, replacements):", "vars[key] # TRICKY: using `latest` as an expansion variable in", "chapters: return chapters return None def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates", "= quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format)", "'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules': [", "\"\"\" Converts a media object into formats usable in the", "formats = [] if 'media' in resource: for media in", "= [] for chapter_id in chapters: format = { 'size':", "list of resource formats and dictionary of project formats \"\"\"", "format['chapters'] = chapters formats.append(format) return formats def _prepare_chapter_formats(media, chapters, expansion_vars):", "identifiers in the project :type chapters_ids: list :return: a list", "import re import copy def parse_media(media, content_version, project_chapters): \"\"\" Converts", "51): # chapters 1..50 chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters", "template that will be used in the formats :param chapters:", "target if not isinstance(replacements, dict): raise Exception('Expected dictionary of replacements", "media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ]", "project: for media in project['media']: media_version = _expand_keys(media['version'], {'latest': content_version})", "return resource_formats, project_formats def _parse_resource(resource, content_version): \"\"\" Converts a resource", "\"\"\" Converts a resource media object into formats usable in", "media items. :param self: :param media_block: :param content_version: :return: \"\"\"", "_make_expansion_variables(media, content_version) if 'quality' in media and len(media['quality']) > 0:", "content :type content_version: string :return: a list of formats \"\"\"", "chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url'", "chapters: format['chapters'] = chapters formats.append(format) return formats def _prepare_chapter_formats(media, chapters,", "not isinstance(replacements, dict): raise Exception('Expected dictionary of replacements but received", "formats :param chapters: a list of chapter ids :type chapters:", "if 'quality' in media and len(media['quality']) > 0: # build", "we allow it. vars['latest'] = '{}'.format(content_version) return vars def _expand_keys(target,", "listed keys black_list = ['url', 'chapter_url'] for key in black_list:", "{} :param target: :param replacements: :return: \"\"\" if isinstance(target, basestring)", "{'latest': content_version}) formats = [] if 'media' in project: for", "raise Exception('Invalid replacement target \"{}\". Expected string but received {}'.format(target,", "_parse_resource(media['resource'], content_version) if 'projects' in media: for project in media['projects']:", "project: the media object :type project: dict :param content_version: the", ":param media: the media object :type media: dict :param content_version:", "_make_format(source_version, media_version, quality, media, expansion_vars): format = { 'format': '',", "found in the string with the dict values. Keys in", "quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else:", "def _make_format(source_version, media_version, quality, media, expansion_vars): format = { 'format':", "key in replacements: if not isinstance(replacements[key], list): result = re.sub(r'{\\s*'", "if chapters: format['chapters'] = chapters formats.append(format) return formats def _prepare_chapter_formats(media,", "format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media,", "media, expansion_vars): format = { 'format': '', 'modified': '', 'size':", "object to inspect :param chapters: a list of chapter ids", "format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return", "content_version, chapters_ids): \"\"\" Converts a project media object into formats", "= _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters formats.append(format)", "'', 'build_rules': [ 'signing.sign_given_url' ] } if quality: format['quality'] =", "a list of formats \"\"\" source_version = _expand_keys(resource['version'], {'latest': content_version})", "project_id == 'obs': # TRICKY: obs projects always have 50", "Since we routinely conditionally prepare chapters in multiple places this", "This is a wrapper around the method `_parse_project_chapter`. Since we", ":type project_chapters: dict :return: resource_formats, project_formats a list of resource", "0: # build format for each quality for quality in", "isinstance(target, basestring) or isinstance(target, str): result = target if not", "source content :type content_version: string :param project_chapters: a dictionary of", "format = { 'format': '', 'modified': '', 'size': 0, 'source_version':", "this requires that we give a well formatted list of", "in the catalog :param chapter_url: the url template that will", "of project chapters :type project_chapters: dict :return: resource_formats, project_formats a", "source content :type content_version: string :return: a list of formats", "content_version}) expansion_vars = _make_expansion_variables(media, content_version) if 'quality' in media and", "dict): raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements))) for", "{ 'format': '', 'modified': '', 'size': 0, 'source_version': '{}'.format(source_version), 'version':", "resource media object into formats usable in the catalog :param", "the formats :param chapters: a list of chapter ids :type", "be expanded in the chapter url :return: \"\"\" if 'chapter_url'", "that may be expanded in the chapter url :return: \"\"\"", "'{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '', 'build_rules': [", "be delimited by brackets {} :param target: :param replacements: :return:", ":param chapters: a list of chapter ids :param expansion_vars: a", "a list of chapter ids :param expansion_vars: a dictionary of", "media object :type media: dict :param content_version: the current version", ":return: a list of formats \"\"\" source_version = _expand_keys(project['version'], {'latest':", "that we give a well formatted list of chapter ids", "{} if 'resource' in media: resource_formats = _parse_resource(media['resource'], content_version) if", ":param content_version: the current version of the source content :type", "usable in the catalog :param media: the media object :type", "= { 'format': '', 'modified': '', 'size': 0, 'source_version': '{}'.format(source_version),", "TRICKY: using `latest` as an expansion variable in urls is", "formats.append(format) return formats def _make_format(source_version, media_version, quality, media, expansion_vars): format", "Creates a dictionary of expansion variables for media items. :param", "media: for project in media['projects']: project_id = project['identifier'] chapters =", "= chapters formats.append(format) return formats def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\"", "result = re.sub(r'{\\s*' + key + '\\s*}', '{}'.format(replacements[key]), result) return", "= project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters) return resource_formats, project_formats", "format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) formats.append(format) else: #", "chapters) return resource_formats, project_formats def _parse_resource(resource, content_version): \"\"\" Converts a", ":return: \"\"\" if isinstance(target, basestring) or isinstance(target, str): result =", "black_list: if key in vars: del vars[key] # TRICKY: using", "= quality format = _make_format(source_version=source_version, media_version=media_version, quality=quality, media=media, expansion_vars=expansion_vars) chapters", "replacements): \"\"\" Replaces all the dict keys found in the", "but it's a common misunderstanding so we allow it. vars['latest']", "if chapters: return chapters return None def _parse_project_chapter(chapter_url, chapters): \"\"\"", "'{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url': _expand_keys(media['url'], expansion_vars), 'signature': '',", "object into formats usable in the catalog :param project: the", "[ 'signing.sign_given_url' ] } formats.append(format) return formats def _make_expansion_variables(media_block, content_version):", "quality=None, media=media, expansion_vars=expansion_vars) formats.append(format) return formats def _make_format(source_version, media_version, quality,", "a book # only book RCs can have chapter formats", "return chapters return None def _parse_project_chapter(chapter_url, chapters): \"\"\" Generates chapter", "chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars) if chapters: format['chapters'] = chapters", "replacements: if not isinstance(replacements[key], list): result = re.sub(r'{\\s*' + key", "give a well formatted list of chapter ids and check", "we give a well formatted list of chapter ids and", ":param target: :param replacements: :return: \"\"\" if isinstance(target, basestring) or", "each quality for quality in media['quality']: expansion_vars['quality'] = quality format", "expansion_vars) if chapters: format['chapters'] = chapters formats.append(format) return formats def", "a media object into formats usable in the catalog :param", "chapters: list :return: \"\"\" # TODO: this requires that we", "be used in the formats :param chapters: a list of", "= {} if 'resource' in media: resource_formats = _parse_resource(media['resource'], content_version)", "formatted list of chapter ids and check if the Rc", "the spec, # but it's a common misunderstanding so we", "empty projects to still publish media. for x in range(1,", "routinely conditionally prepare chapters in multiple places this handles it", "# strip black listed keys black_list = ['url', 'chapter_url'] for", "the current version of the source content :type content_version: string", "of chapter ids and check if the Rc is a", ":type chapters: list :return: \"\"\" # TODO: this requires that", "project_chapters: chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project, content_version, chapters) return", "into formats usable in the catalog :param media: the media", "a well formatted list of chapter ids and check if", "strip black listed keys black_list = ['url', 'chapter_url'] for key", "in chapters: format = { 'size': 0, 'length': 0, 'modified':", "if not isinstance(replacements[key], list): result = re.sub(r'{\\s*' + key +", "len(media['quality']) > 0: # build format for each quality for", "media_version, quality, media, expansion_vars): format = { 'format': '', 'modified':", "the url template that will be used in the formats", "of replacements but received {}'.format(type(replacements))) for key in replacements: if", "to still publish media. for x in range(1, 51): #", "for key in replacements: if not isinstance(replacements[key], list): result =", "if project_id in project_chapters: chapters = project_chapters[project_id] project_formats[project_id] = _parse_project(project,", "copy def parse_media(media, content_version, project_chapters): \"\"\" Converts a media object", "dictionary of variables that may be expanded in the chapter", "def _parse_project(project, content_version, chapters_ids): \"\"\" Converts a project media object", "dictionary of project formats \"\"\" resource_formats = [] project_formats =", "_expand_keys(resource['version'], {'latest': content_version}) formats = [] if 'media' in resource:", "delimited by brackets {} :param target: :param replacements: :return: \"\"\"", "string :param project_chapters: a dictionary of project chapters :type project_chapters:", "'signature': '', 'build_rules': [ 'signing.sign_given_url' ] } formats.append(format) return formats", "keys black_list = ['url', 'chapter_url'] for key in black_list: if", "formats \"\"\" source_version = _expand_keys(resource['version'], {'latest': content_version}) formats = []", "common misunderstanding so we allow it. vars['latest'] = '{}'.format(content_version) return", "media_block: :param content_version: :return: \"\"\" vars = copy.copy(media_block) # strip", "the catalog :param project: the media object :type project: dict", "the chapter url :return: \"\"\" if 'chapter_url' in media: chapter_url", "if key in vars: del vars[key] # TRICKY: using `latest`", "'modified': '', 'identifier': chapter_id, 'url': _expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '',", "resource formats and dictionary of project formats \"\"\" resource_formats =", "chapters.append(str(x).zfill(2)) if project_id in project_chapters: chapters = project_chapters[project_id] project_formats[project_id] =", "in the project :type chapters_ids: list :return: a list of", "return format def _parse_project(project, content_version, chapters_ids): \"\"\" Converts a project", "\"\"\" Replaces all the dict keys found in the string", "return result elif isinstance(target, int): return target else: raise Exception('Invalid", "chapters: a list of chapter ids :param expansion_vars: a dictionary", "of the source content :type content_version: string :return: a list", "if 'media' in resource: for media in resource['media']: media_version =", "\"\"\" Generates chapter formats for use in the catalog :param", "# build format for each quality for quality in media['quality']:", "_expand_keys(chapter_url, {'chapter': chapter_id}), 'signature': '', 'build_rules': [ 'signing.sign_given_url' ] }", "Exception('Invalid replacement target \"{}\". Expected string but received {}'.format(target, type(target)))", "the catalog :param chapter_url: the url template that will be", "def _parse_resource(resource, content_version): \"\"\" Converts a resource media object into", "chapter_id in chapters: format = { 'size': 0, 'length': 0,", "format for each quality for quality in media['quality']: expansion_vars['quality'] =", "] } formats.append(format) return formats def _make_expansion_variables(media_block, content_version): \"\"\" Creates", "# build single format format = _make_format(source_version=source_version, media_version=media_version, quality=None, media=media,", "Generates chapter formats for use in the catalog :param chapter_url:", "the dict keys found in the string with the dict", ":param chapters_ids: a list of chapter identifiers in the project", "media in project['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars =", "catalog :param chapter_url: the url template that will be used", "if quality: format['quality'] = quality return format def _parse_project(project, content_version,", "a dictionary of expansion variables for media items. :param self:", "expanded in the chapter url :return: \"\"\" if 'chapter_url' in", "chapter formats for use in the catalog :param chapter_url: the", "chapters formats.append(format) return formats def _prepare_chapter_formats(media, chapters, expansion_vars): \"\"\" This", "# TRICKY: obs projects always have 50 chapters # This", "} formats.append(format) return formats def _make_expansion_variables(media_block, content_version): \"\"\" Creates a", "= copy.copy(media_block) # strip black listed keys black_list = ['url',", "media in resource['media']: media_version = _expand_keys(media['version'], {'latest': content_version}) expansion_vars =", "def parse_media(media, content_version, project_chapters): \"\"\" Converts a media object into", "content :type content_version: string :param chapters_ids: a list of chapter", "or isinstance(target, str): result = target if not isinstance(replacements, dict):", "expansion_vars: a dictionary of variables that may be expanded in", "re.sub(r'{\\s*' + key + '\\s*}', '{}'.format(replacements[key]), result) return result elif", "_make_format(source_version=source_version, media_version=media_version, quality=None, media=media, expansion_vars=expansion_vars) chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)", "'', 'size': 0, 'source_version': '{}'.format(source_version), 'version': '{}'.format(media_version), 'contributor': media['contributor'], 'url':", "spec, # but it's a common misunderstanding so we allow", "an expansion variable in urls is not explicitly stated in", "resource: dict :param content_version: the current version of the source" ]
[ "WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作,", "# todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition): continue if transition.task:", "else: return False def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation", "no need to set\") return None state_relation.waiting = True state_relation.save()", "is no need to set\") return None state_relation.waiting = False", "def workflow_is_finished(self): state = self.get_state() if not state.transitions.all(): return True", "BaseTask class WorkflowMixin(object): \"\"\"Mixin class to make objects workflow aware.", "workflow anymore (it might have one via its content type).", "to the workflow's initial state. **Parameters:** workflow The workflow which", "the object gets the passed workflow and the state is", "def remove_workflow(self): \"\"\"Removes the workflow from the object. After this", "return True else: return False def state_is_waiting(self): return utils.get_state_relation(self).waiting def", "make objects workflow aware. \"\"\" def get_workflow(self): \"\"\"Returns the current", "waiting task setting. if transition.task and isinstance(task, WaitingTask): self.state_set_waiting() #", "when the WaitingTask has finished.\") state = self.get_state() transitions =", "do_transition(self, transition, user): \"\"\"Processes the passed transition (if allowed). \"\"\"", "transition (if allowed). \"\"\" return utils.do_transition(self, transition, user) def do_next_state(self):", "def state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting: print(\"there is no", "workflow nothing happens. Otherwise the object gets the passed workflow", "is no need to set\") return None state_relation.waiting = True", "workflow) def get_state(self): \"\"\"Returns the current workflow state of the", "if not next_state_instance.manual: return self.do_next_state() return True def set_log(self, state,", "via its content type). \"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self, workflow):", "state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self) if not", "state of the object. \"\"\" return utils.get_state(self) def set_state(self, state):", "\"\"\" return utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets the initial state", "state.transitions.all(): return True else: return False def state_is_waiting(self): return utils.get_state_relation(self).waiting", "the local workflow for the object. If the object has", "If the object has already the given workflow nothing happens.", "self.set_state(next_state_instance) # info:This is the waiting task setting. if transition.task", "True def set_log(self, state, source_state=None, transition=None): return utils.set_log(self, state, source_state,", "\"is the end state\") return False for transition in transitions:", "def do_next_state(self): if self.state_is_waiting(): print(\"state is waiting! please use method", "not Basetask or WaitingTask instance\") task.run(self, transition) next_state_instance = transition.destination", "passed transition (if allowed). \"\"\" return utils.do_transition(self, transition, user) def", "workflow for the object. If the object has already the", "given workflow nothing happens. Otherwise the object gets the passed", "print(state.name, \"is the end state\") return False for transition in", "object gets the passed workflow and the state is set", "def set_initial_state(self): \"\"\"Sets the initial state of the current workflow", "def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self) if", "has already the given workflow nothing happens. Otherwise the object", "# todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask,", "objects workflow aware. \"\"\" def get_workflow(self): \"\"\"Returns the current workflow", "# info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, \"is the end state\")", "transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self,", "transition in transitions: if transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个,", "called the object has no *own* workflow anymore (it might", "return False def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation =", "which should be set to the object. Can be a", "transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, \"is", "be a Workflow instance or a string with the workflow", "state\") return False for transition in transitions: if transition.condition: cond", "print(\"state is waiting! please use method .state_end_waiting() when the WaitingTask", "set\") return None state_relation.waiting = False state_relation.save() def state_set_waiting(self): state_relation", "gets the passed workflow. \"\"\" return utils.set_workflow_for_object(self, workflow) def get_state(self):", "the object has already the given workflow nothing happens. Otherwise", "# -*- coding:utf-8 -*- # create_time: 2019/8/5 16:02 # __author__", "return False for transition in transitions: if transition.condition: cond =", "workflow aware. \"\"\" def get_workflow(self): \"\"\"Returns the current workflow of", "the passed workflow and the state is set to the", "the passed workflow to the object. This will set the", "-*- # create_time: 2019/8/5 16:02 # __author__ = 'brad' from", "need to set\") return None state_relation.waiting = False state_relation.save() def", "utils.get_state(self) def set_state(self, state): \"\"\"Sets the workflow state of the", "return self.do_next_state() return True def set_log(self, state, source_state=None, transition=None): return", "is the waiting task setting. if transition.task and isinstance(task, WaitingTask):", "create_time: 2019/8/5 16:02 # __author__ = 'brad' from . import", "Can be a Workflow instance or a string with the", "is not Basetask or WaitingTask instance\") task.run(self, transition) next_state_instance =", "workflow which should be set to the object. Can be", "WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin class to make objects workflow", "__author__ = 'brad' from . import utils from .tasks.base import", "utils.set_log(self, state, source_state, transition) def get_log(self): return utils.get_log(self) def workflow_is_finished(self):", "if not cond().run(self, transition): continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善", "raise TypeError(\"This task is not Basetask or WaitingTask instance\") task.run(self,", "state of the current workflow to the object. \"\"\" return", "the object. \"\"\" return utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets the", "'brad' from . import utils from .tasks.base import WaitingTask, BaseTask", "object. \"\"\" return utils.get_state(self) def set_state(self, state): \"\"\"Sets the workflow", "if not transitions: print(state.name, \"is the end state\") return False", "get_state(self): \"\"\"Returns the current workflow state of the object. \"\"\"", "\"\"\" def get_workflow(self): \"\"\"Returns the current workflow of the object.", "the workflow's initial state. **Parameters:** workflow The workflow which should", "info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, \"is the end state\") return", "16:02 # __author__ = 'brad' from . import utils from", "to the object. Can be a Workflow instance or a", "instance\") task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance) # info:This is", "if transition.task and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name,", "# create_time: 2019/8/5 16:02 # __author__ = 'brad' from .", "of the object. \"\"\" return utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets", "state_relation = utils.get_state_relation(self) if state_relation.waiting: print(\"there is no need to", "\"\"\"Sets the initial state of the current workflow to the", "todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)):", "passed workflow to the object. This will set the local", "False for transition in transitions: if transition.condition: cond = utils.import_from_string(transition.condition)", "state_relation.waiting: print(\"there is no need to set\") return None state_relation.waiting", "task = utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This", "transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return self.do_next_state()", "a Workflow instance or a string with the workflow name.", "workflow from the object. After this function has been called", "end state\") return False for transition in transitions: if transition.condition:", "= state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, \"is the", "workflow and the state is set to the workflow's initial", "use method .state_end_waiting() when the WaitingTask has finished.\") state =", "= utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This task", "its content type). \"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets", "= utils.get_state_relation(self) if state_relation.waiting: print(\"there is no need to set\")", "is set to the workflow's initial state. **Parameters:** workflow The", "transition, user) def do_next_state(self): if self.state_is_waiting(): print(\"state is waiting! please", "state of the object. \"\"\" return utils.set_state(self, state) def set_initial_state(self):", "have one via its content type). \"\"\" return utils.remove_workflow_from_object(self) def", "initial state of the current workflow to the object. \"\"\"", "object has already the given workflow nothing happens. Otherwise the", "\"\"\"Mixin class to make objects workflow aware. \"\"\" def get_workflow(self):", "transition.destination self.set_state(next_state_instance) # info:This is the waiting task setting. if", "utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets the passed workflow to the", "# todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return self.do_next_state() return", "if self.state_is_waiting(): print(\"state is waiting! please use method .state_end_waiting() when", "has no *own* workflow anymore (it might have one via", "the given workflow nothing happens. Otherwise the object gets the", "the passed workflow. \"\"\" return utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns", "return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self) if not state_relation.waiting:", "# info:This is the waiting task setting. if transition.task and", "workflow): \"\"\"Sets the passed workflow to the object. This will", "\"\"\"Returns the current workflow state of the object. \"\"\" return", "current workflow to the object. \"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self,", "import WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin class to make objects", "state, source_state=None, transition=None): return utils.set_log(self, state, source_state, transition) def get_log(self):", "the object. Can be a Workflow instance or a string", "= transition.destination self.set_state(next_state_instance) # info:This is the waiting task setting.", "self.get_state() if not state.transitions.all(): return True else: return False def", "def state_end_waiting(self): state_relation = utils.get_state_relation(self) if not state_relation.waiting: print(\"there is", "workflow_is_finished(self): state = self.get_state() if not state.transitions.all(): return True else:", "utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self) if not state_relation.waiting: print(\"there", "state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions: print(state.name, \"is the end", "\"\"\" return utils.get_state(self) def set_state(self, state): \"\"\"Sets the workflow state", "False def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self): state_relation = utils.get_state_relation(self)", "with the workflow name. obj The object which gets the", "which gets the passed workflow. \"\"\" return utils.set_workflow_for_object(self, workflow) def", "if not isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This task is not", "\"\"\"Sets the passed workflow to the object. This will set", "source_state=None, transition=None): return utils.set_log(self, state, source_state, transition) def get_log(self): return", "= self.get_state() if not state.transitions.all(): return True else: return False", "state is set to the workflow's initial state. **Parameters:** workflow", "return utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets the initial state of", "return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets the passed workflow to", "workflow of the object. \"\"\" return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes", "anymore (it might have one via its content type). \"\"\"", "set to the object. Can be a Workflow instance or", "return True def set_log(self, state, source_state=None, transition=None): return utils.set_log(self, state,", "source_state, transition) def get_log(self): return utils.get_log(self) def workflow_is_finished(self): state =", "should be set to the object. Can be a Workflow", "the current workflow state of the object. \"\"\" return utils.get_state(self)", "not state_relation.waiting: print(\"there is no need to set\") return None", "no *own* workflow anymore (it might have one via its", "the workflow from the object. After this function has been", "for transition in transitions: if transition.condition: cond = utils.import_from_string(transition.condition) #", "*own* workflow anymore (it might have one via its content", "the workflow name. obj The object which gets the passed", "setting. if transition.task and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name,", "the end state\") return False for transition in transitions: if", "the state is set to the workflow's initial state. **Parameters:**", "in transitions: if transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了", "waiting! please use method .state_end_waiting() when the WaitingTask has finished.\")", "string with the workflow name. obj The object which gets", "state_relation.waiting = False state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self) if", "coding:utf-8 -*- # create_time: 2019/8/5 16:02 # __author__ = 'brad'", "transition): continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)()", "instance or a string with the workflow name. obj The", "the object. After this function has been called the object", "utils from .tasks.base import WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin class", "if not state.transitions.all(): return True else: return False def state_is_waiting(self):", "continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if", "return utils.do_transition(self, transition, user) def do_next_state(self): if self.state_is_waiting(): print(\"state is", "info:This is the waiting task setting. if transition.task and isinstance(task,", "will set the local workflow for the object. If the", "transition) def get_log(self): return utils.get_log(self) def workflow_is_finished(self): state = self.get_state()", "self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行", "utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This task is", "Workflow instance or a string with the workflow name. obj", "state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting: print(\"there is no need", "\"\"\" return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the workflow from the", "self.state_is_waiting(): print(\"state is waiting! please use method .state_end_waiting() when the", "workflow's initial state. **Parameters:** workflow The workflow which should be", "= False state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting:", "of the object. \"\"\" return utils.get_state(self) def set_state(self, state): \"\"\"Sets", "WaitingTask has finished.\") state = self.get_state() transitions = state.transitions.all() #", "utils.get_log(self) def workflow_is_finished(self): state = self.get_state() if not state.transitions.all(): return", "return utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns the current workflow state", "remove_workflow(self): \"\"\"Removes the workflow from the object. After this function", "object. This will set the local workflow for the object.", "cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition):", "After this function has been called the object has no", "if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not", "def get_workflow(self): \"\"\"Returns the current workflow of the object. \"\"\"", "is waiting! please use method .state_end_waiting() when the WaitingTask has", "a string with the workflow name. obj The object which", ".state_end_waiting() when the WaitingTask has finished.\") state = self.get_state() transitions", "return utils.get_state(self) def set_state(self, state): \"\"\"Sets the workflow state of", "todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition): continue if transition.task: #", "utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the workflow from the object. After", "WaitingTask instance\") task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance) # info:This", ".tasks.base import WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin class to make", "-*- coding:utf-8 -*- # create_time: 2019/8/5 16:02 # __author__ =", "function has been called the object has no *own* workflow", "\"\"\" return utils.do_transition(self, transition, user) def do_next_state(self): if self.state_is_waiting(): print(\"state", "(it might have one via its content type). \"\"\" return", "state = self.get_state() if not state.transitions.all(): return True else: return", "只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return self.do_next_state() return True def", "get_workflow(self): \"\"\"Returns the current workflow of the object. \"\"\" return", "state) def set_initial_state(self): \"\"\"Sets the initial state of the current", "aware. \"\"\" def get_workflow(self): \"\"\"Returns the current workflow of the", "transition) next_state_instance = transition.destination self.set_state(next_state_instance) # info:This is the waiting", "set_state(self, state): \"\"\"Sets the workflow state of the object. \"\"\"", "True else: return False def state_is_waiting(self): return utils.get_state_relation(self).waiting def state_end_waiting(self):", "and the state is set to the workflow's initial state.", "WaitingTask)): raise TypeError(\"This task is not Basetask or WaitingTask instance\")", "info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not", "就跳出轮询设置状态了 if not cond().run(self, transition): continue if transition.task: # todo:task是顺序还是异步执行,", "set to the workflow's initial state. **Parameters:** workflow The workflow", "utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets the initial state of the", "WorkflowMixin(object): \"\"\"Mixin class to make objects workflow aware. \"\"\" def", "= utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition): continue", "get_log(self): return utils.get_log(self) def workflow_is_finished(self): state = self.get_state() if not", "class WorkflowMixin(object): \"\"\"Mixin class to make objects workflow aware. \"\"\"", "TypeError(\"This task is not Basetask or WaitingTask instance\") task.run(self, transition)", "the object. If the object has already the given workflow", "obj The object which gets the passed workflow. \"\"\" return", "object has no *own* workflow anymore (it might have one", "This will set the local workflow for the object. If", "None state_relation.waiting = False state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self)", "or a string with the workflow name. obj The object", "utils.get_state_relation(self) if not state_relation.waiting: print(\"there is no need to set\")", "还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not isinstance(task, (BaseTask, WaitingTask)): raise", "passed workflow and the state is set to the workflow's", "\"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): \"\"\"Processes the passed", "False state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting: print(\"there", "from the object. After this function has been called the", "if transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not", "current workflow of the object. \"\"\" return utils.get_workflow(self) def remove_workflow(self):", "\"\"\" return utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns the current workflow", "return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): \"\"\"Processes the passed transition", "self.do_next_state() return True def set_log(self, state, source_state=None, transition=None): return utils.set_log(self,", "= self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions:", "and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) #", "the WaitingTask has finished.\") state = self.get_state() transitions = state.transitions.all()", "(BaseTask, WaitingTask)): raise TypeError(\"This task is not Basetask or WaitingTask", "current workflow state of the object. \"\"\" return utils.get_state(self) def", "set the local workflow for the object. If the object", "the object. \"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): \"\"\"Processes", "transition, user): \"\"\"Processes the passed transition (if allowed). \"\"\" return", "do_next_state(self): if self.state_is_waiting(): print(\"state is waiting! please use method .state_end_waiting()", "def set_log(self, state, source_state=None, transition=None): return utils.set_log(self, state, source_state, transition)", "this function has been called the object has no *own*", "return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the workflow from the object.", "the object. \"\"\" return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the workflow", "the workflow state of the object. \"\"\" return utils.set_state(self, state)", "one via its content type). \"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self,", "user): \"\"\"Processes the passed transition (if allowed). \"\"\" return utils.do_transition(self,", "or WaitingTask instance\") task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance) #", "has been called the object has no *own* workflow anymore", "import utils from .tasks.base import WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin", "return utils.get_log(self) def workflow_is_finished(self): state = self.get_state() if not state.transitions.all():", "if not state_relation.waiting: print(\"there is no need to set\") return", "passed workflow. \"\"\" return utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns the", "The object which gets the passed workflow. \"\"\" return utils.set_workflow_for_object(self,", "isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This task is not Basetask or", "next_state_instance.manual: return self.do_next_state() return True def set_log(self, state, source_state=None, transition=None):", "the waiting task setting. if transition.task and isinstance(task, WaitingTask): self.state_set_waiting()", "print(\"there is no need to set\") return None state_relation.waiting =", "state): \"\"\"Sets the workflow state of the object. \"\"\" return", "to set\") return None state_relation.waiting = False state_relation.save() def state_set_waiting(self):", "\"\"\"Sets the workflow state of the object. \"\"\" return utils.set_state(self,", "the current workflow to the object. \"\"\" return self.set_state(self.get_workflow().initial_state) def", "= 'brad' from . import utils from .tasks.base import WaitingTask,", "not state.transitions.all(): return True else: return False def state_is_waiting(self): return", "workflow to the object. This will set the local workflow", "been called the object has no *own* workflow anymore (it", "\"\"\"Processes the passed transition (if allowed). \"\"\" return utils.do_transition(self, transition,", "the object. This will set the local workflow for the", "return utils.set_log(self, state, source_state, transition) def get_log(self): return utils.get_log(self) def", "has finished.\") state = self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了", "not next_state_instance.manual: return self.do_next_state() return True def set_log(self, state, source_state=None,", "workflow to the object. \"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition,", "set_workflow(self, workflow): \"\"\"Sets the passed workflow to the object. This", "of the object. \"\"\" return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the", "happens. Otherwise the object gets the passed workflow and the", "self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual:", "already the given workflow nothing happens. Otherwise the object gets", "object. \"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): \"\"\"Processes the", "todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return self.do_next_state() return True", "utils.get_state_relation(self) if state_relation.waiting: print(\"there is no need to set\") return", "state = self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if not", "type). \"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets the passed", "cond().run(self, transition): continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task =", "state_end_waiting(self): state_relation = utils.get_state_relation(self) if not state_relation.waiting: print(\"there is no", "return None state_relation.waiting = False state_relation.save() def state_set_waiting(self): state_relation =", "**Parameters:** workflow The workflow which should be set to the", "nothing happens. Otherwise the object gets the passed workflow and", "def set_workflow(self, workflow): \"\"\"Sets the passed workflow to the object.", "set_initial_state(self): \"\"\"Sets the initial state of the current workflow to", "state, source_state, transition) def get_log(self): return utils.get_log(self) def workflow_is_finished(self): state", "The workflow which should be set to the object. Can", "the initial state of the current workflow to the object.", "be set to the object. Can be a Workflow instance", "allowed). \"\"\" return utils.do_transition(self, transition, user) def do_next_state(self): if self.state_is_waiting():", "isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作,", "def get_state(self): \"\"\"Returns the current workflow state of the object.", "local workflow for the object. If the object has already", "for the object. If the object has already the given", "transition.task and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name)", "user) def do_next_state(self): if self.state_is_waiting(): print(\"state is waiting! please use", "就在这里执行 if not next_state_instance.manual: return self.do_next_state() return True def set_log(self,", "(if allowed). \"\"\" return utils.do_transition(self, transition, user) def do_next_state(self): if", "finished.\") state = self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if", "2019/8/5 16:02 # __author__ = 'brad' from . import utils", "object. \"\"\" return utils.set_state(self, state) def set_initial_state(self): \"\"\"Sets the initial", "if state_relation.waiting: print(\"there is no need to set\") return None", "def set_state(self, state): \"\"\"Sets the workflow state of the object.", "to make objects workflow aware. \"\"\" def get_workflow(self): \"\"\"Returns the", "workflow state of the object. \"\"\" return utils.set_state(self, state) def", "transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task = utils.import_from_string(transition.task)() if not isinstance(task,", "self.get_state() transitions = state.transitions.all() # info:这里代表状态节点是最后的一层了 if not transitions: print(state.name,", "object. After this function has been called the object has", "the current workflow of the object. \"\"\" return utils.get_workflow(self) def", "to the object. \"\"\" return self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user):", "not isinstance(task, (BaseTask, WaitingTask)): raise TypeError(\"This task is not Basetask", "set_log(self, state, source_state=None, transition=None): return utils.set_log(self, state, source_state, transition) def", "workflow The workflow which should be set to the object.", "gets the passed workflow and the state is set to", "Basetask or WaitingTask instance\") task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance)", "Otherwise the object gets the passed workflow and the state", "object. \"\"\" return utils.get_workflow(self) def remove_workflow(self): \"\"\"Removes the workflow from", "\"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets the passed workflow", "from .tasks.base import WaitingTask, BaseTask class WorkflowMixin(object): \"\"\"Mixin class to", "utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns the current workflow state of", "not transitions: print(state.name, \"is the end state\") return False for", ". import utils from .tasks.base import WaitingTask, BaseTask class WorkflowMixin(object):", "the passed transition (if allowed). \"\"\" return utils.do_transition(self, transition, user)", "state_relation.save() def state_set_waiting(self): state_relation = utils.get_state_relation(self) if state_relation.waiting: print(\"there is", "not cond().run(self, transition): continue if transition.task: # todo:task是顺序还是异步执行, 还是有前向倚赖,这个需要确定完善 task", "# __author__ = 'brad' from . import utils from .tasks.base", "next_state_instance = transition.destination self.set_state(next_state_instance) # info:This is the waiting task", "source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if not next_state_instance.manual: return", "utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if not cond().run(self, transition): continue if", "self.set_state(self.get_workflow().initial_state) def do_transition(self, transition, user): \"\"\"Processes the passed transition (if", "workflow name. obj The object which gets the passed workflow.", "method .state_end_waiting() when the WaitingTask has finished.\") state = self.get_state()", "name. obj The object which gets the passed workflow. \"\"\"", "state_relation = utils.get_state_relation(self) if not state_relation.waiting: print(\"there is no need", "object which gets the passed workflow. \"\"\" return utils.set_workflow_for_object(self, workflow)", "class to make objects workflow aware. \"\"\" def get_workflow(self): \"\"\"Returns", "please use method .state_end_waiting() when the WaitingTask has finished.\") state", "= utils.get_state_relation(self) if not state_relation.waiting: print(\"there is no need to", "task is not Basetask or WaitingTask instance\") task.run(self, transition) next_state_instance", "workflow. \"\"\" return utils.set_workflow_for_object(self, workflow) def get_state(self): \"\"\"Returns the current", "transitions: print(state.name, \"is the end state\") return False for transition", "task.run(self, transition) next_state_instance = transition.destination self.set_state(next_state_instance) # info:This is the", "task setting. if transition.task and isinstance(task, WaitingTask): self.state_set_waiting() # info:记录日志", "object. Can be a Workflow instance or a string with", "might have one via its content type). \"\"\" return utils.remove_workflow_from_object(self)", "utils.do_transition(self, transition, user) def do_next_state(self): if self.state_is_waiting(): print(\"state is waiting!", "of the current workflow to the object. \"\"\" return self.set_state(self.get_workflow().initial_state)", "transitions: if transition.condition: cond = utils.import_from_string(transition.condition) # todo:目前这里是轮询到条件正确的一个, 就跳出轮询设置状态了 if", "initial state. **Parameters:** workflow The workflow which should be set", "content type). \"\"\" return utils.remove_workflow_from_object(self) def set_workflow(self, workflow): \"\"\"Sets the", "the object. \"\"\" return utils.get_state(self) def set_state(self, state): \"\"\"Sets the", "def get_log(self): return utils.get_log(self) def workflow_is_finished(self): state = self.get_state() if", "transition=None): return utils.set_log(self, state, source_state, transition) def get_log(self): return utils.get_log(self)", "to the object. This will set the local workflow for", "the object has no *own* workflow anymore (it might have", "\"\"\"Removes the workflow from the object. After this function has", "workflow state of the object. \"\"\" return utils.get_state(self) def set_state(self,", "\"\"\"Returns the current workflow of the object. \"\"\" return utils.get_workflow(self)", "# info:记录日志 self.set_log(state=next_state_instance.name, source_state=state.name, transition=transition.name) # todo:这个是遍历操作, 只要是设置为下一个状态不需要手动操作, 就在这里执行 if", "no need to set\") return None state_relation.waiting = False state_relation.save()", "object. If the object has already the given workflow nothing", "state. **Parameters:** workflow The workflow which should be set to", "def do_transition(self, transition, user): \"\"\"Processes the passed transition (if allowed).", "from . import utils from .tasks.base import WaitingTask, BaseTask class" ]
[ "# update duration setattr(instance, self.duration_field, duration) def formfield(self, **kwargs): #", "name, **kwargs) def check(self, **kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend())", "def formfield(self, **kwargs): # use normal FileFieldWidget for now return", "VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): # Clear the video info", "def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField,", "to_python(self, data): # use FileField method return super(ImageField, self).to_python(data) def", "setattr(instance, self.duration_field, duration) def formfield(self, **kwargs): # use normal FileFieldWidget", "if not self.duration_field: return # Nothing to update if we", "self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self): backend = get_backend_class() return", "# write `width` and `height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs)", "return errors def _check_backend(self): backend = get_backend_class() return backend.check() def", "pass class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): # Clear the", "return backend.check() def to_python(self, data): # use FileField method return", "file if not _file._committed: return # write `width` and `height`", "super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self): backend = get_backend_class()", "not force: return if getattr(instance, self.duration_field) and not force: return", "errors def _check_backend(self): backend = get_backend_class() return backend.check() def to_python(self,", "duration) def formfield(self, **kwargs): # use normal FileFieldWidget for now", "_ from .backends import get_backend_class from .files import VideoFile class", "# we need a real file if not _file._committed: return", "_file = getattr(instance, self.attname) # we need a real file", "hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class =", "force: return # get duration if file is defined duration", "self.duration_field, duration) def formfield(self, **kwargs): # use normal FileFieldWidget for", "__init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name,", "super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False, *args, **kwargs): _file =", "*args, **kwargs) if not self.duration_field: return # Nothing to update", "= super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self): backend =", "being forced to update. if not _file and not force:", "ImageField, ImageFileDescriptor) from django.utils.translation import ugettext as _ from .backends", "Nothing to update if we have no file and not", "and `height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if not self.duration_field:", "= VideoFieldFile descriptor_class = VideoFileDescriptor description = _(\"Video\") def __init__(self,", "`height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if not self.duration_field: return", "= getattr(instance, self.attname) # we need a real file if", "the video info cache if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile,", "class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): #", "*args, **kwargs): _file = getattr(instance, self.attname) # we need a", "self).to_python(data) def update_dimension_fields(self, instance, force=False, *args, **kwargs): _file = getattr(instance,", "# Nothing to update if we have no file and", "if getattr(instance, self.duration_field) and not force: return # get duration", "if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class", "VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True):", "= _file.duration if _file else None # update duration setattr(instance,", "FieldFile): def delete(self, save=True): # Clear the video info cache", "not being forced to update. if not _file and not", "duration = _file.duration if _file else None # update duration", "force=False, *args, **kwargs): _file = getattr(instance, self.attname) # we need", "file and not being forced to update. if not _file", "file is defined duration = _file.duration if _file else None", "return if getattr(instance, self.duration_field) and not force: return # get", "write `width` and `height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if", "class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): # Clear the video", "use FileField method return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False,", "ugettext as _ from .backends import get_backend_class from .files import", "import get_backend_class from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class", "def update_dimension_fields(self, instance, force=False, *args, **kwargs): _file = getattr(instance, self.attname)", "a real file if not _file._committed: return # write `width`", "from django.db.models.fields.files import (FieldFile, ImageField, ImageFileDescriptor) from django.utils.translation import ugettext", "super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if not self.duration_field: return #", "**kwargs): # use normal FileFieldWidget for now return super(ImageField, self).formfield(**kwargs)", "duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def", "self.attname) # we need a real file if not _file._committed:", "return # get duration if file is defined duration =", "attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor description = _(\"Video\") def", "description = _(\"Video\") def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field", "not _file and not force: return if getattr(instance, self.duration_field) and", "self.duration_field) and not force: return # get duration if file", "**kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self):", "ImageFileDescriptor) from django.utils.translation import ugettext as _ from .backends import", "errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def _check_backend(self): backend", ".backends import get_backend_class from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass", "getattr(instance, self.duration_field) and not force: return # get duration if", "= get_backend_class() return backend.check() def to_python(self, data): # use FileField", "self.duration_field: return # Nothing to update if we have no", "need a real file if not _file._committed: return # write", "_file._committed: return # write `width` and `height` super(VideoField, self).update_dimension_fields(instance, force,", "formfield(self, **kwargs): # use normal FileFieldWidget for now return super(ImageField,", "getattr(instance, self.attname) # we need a real file if not", "# get duration if file is defined duration = _file.duration", "self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs):", "force, *args, **kwargs) if not self.duration_field: return # Nothing to", "# use FileField method return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance,", "VideoField(ImageField): attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor description = _(\"Video\")", "self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super(ImageField, self).check(**kwargs)", "backend = get_backend_class() return backend.check() def to_python(self, data): # use", "django.db.models.fields.files import (FieldFile, ImageField, ImageFileDescriptor) from django.utils.translation import ugettext as", "get_backend_class() return backend.check() def to_python(self, data): # use FileField method", "= _(\"Video\") def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field =", "'_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile", "**kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self,", "forced to update. if not _file and not force: return", "we have no file and not being forced to update.", "import (FieldFile, ImageField, ImageFileDescriptor) from django.utils.translation import ugettext as _", "if not _file and not force: return if getattr(instance, self.duration_field)", "cache if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField):", "no file and not being forced to update. if not", "else None # update duration setattr(instance, self.duration_field, duration) def formfield(self,", "_check_backend(self): backend = get_backend_class() return backend.check() def to_python(self, data): #", "video info cache if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save)", "descriptor_class = VideoFileDescriptor description = _(\"Video\") def __init__(self, verbose_name=None, name=None,", "if file is defined duration = _file.duration if _file else", "def to_python(self, data): # use FileField method return super(ImageField, self).to_python(data)", "# Clear the video info cache if hasattr(self, '_info_cache'): del", "info cache if hasattr(self, '_info_cache'): del self._info_cache super(VideoFieldFile, self).delete(save=save) class", "instance, force=False, *args, **kwargs): _file = getattr(instance, self.attname) # we", "_file else None # update duration setattr(instance, self.duration_field, duration) def", "delete(self, save=True): # Clear the video info cache if hasattr(self,", "return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False, *args, **kwargs): _file", "name=None, duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs)", "get_backend_class from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile,", "if not _file._committed: return # write `width` and `height` super(VideoField,", "django.utils.translation import ugettext as _ from .backends import get_backend_class from", "method return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False, *args, **kwargs):", "we need a real file if not _file._committed: return #", "_(\"Video\") def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field = duration_field", "update. if not _file and not force: return if getattr(instance,", "not self.duration_field: return # Nothing to update if we have", "from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile):", "self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor description", "return # Nothing to update if we have no file", "backend.check() def to_python(self, data): # use FileField method return super(ImageField,", "not _file._committed: return # write `width` and `height` super(VideoField, self).update_dimension_fields(instance,", "update if we have no file and not being forced", "and not being forced to update. if not _file and", "update duration setattr(instance, self.duration_field, duration) def formfield(self, **kwargs): # use", "from django.utils.translation import ugettext as _ from .backends import get_backend_class", "not force: return # get duration if file is defined", "check(self, **kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors def", "def check(self, **kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return errors", "if _file else None # update duration setattr(instance, self.duration_field, duration)", "FileField method return super(ImageField, self).to_python(data) def update_dimension_fields(self, instance, force=False, *args,", "if we have no file and not being forced to", "duration setattr(instance, self.duration_field, duration) def formfield(self, **kwargs): # use normal", "have no file and not being forced to update. if", "**kwargs) def check(self, **kwargs): errors = super(ImageField, self).check(**kwargs) errors.extend(self._check_backend()) return", "import ugettext as _ from .backends import get_backend_class from .files", "**kwargs): _file = getattr(instance, self.attname) # we need a real", "(FieldFile, ImageField, ImageFileDescriptor) from django.utils.translation import ugettext as _ from", "and not force: return # get duration if file is", "super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super(ImageField,", "real file if not _file._committed: return # write `width` and", "to update. if not _file and not force: return if", "defined duration = _file.duration if _file else None # update", "duration if file is defined duration = _file.duration if _file", "def delete(self, save=True): # Clear the video info cache if", "duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors =", "None # update duration setattr(instance, self.duration_field, duration) def formfield(self, **kwargs):", "del self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class", ".files import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def", "VideoFieldFile descriptor_class = VideoFileDescriptor description = _(\"Video\") def __init__(self, verbose_name=None,", "= VideoFileDescriptor description = _(\"Video\") def __init__(self, verbose_name=None, name=None, duration_field=None,", "data): # use FileField method return super(ImageField, self).to_python(data) def update_dimension_fields(self,", "_file.duration if _file else None # update duration setattr(instance, self.duration_field,", "_file and not force: return if getattr(instance, self.duration_field) and not", "**kwargs) if not self.duration_field: return # Nothing to update if", "to update if we have no file and not being", "verbose_name=None, name=None, duration_field=None, **kwargs): self.duration_field = duration_field super(VideoField, self).__init__(verbose_name, name,", "update_dimension_fields(self, instance, force=False, *args, **kwargs): _file = getattr(instance, self.attname) #", "Clear the video info cache if hasattr(self, '_info_cache'): del self._info_cache", "from .backends import get_backend_class from .files import VideoFile class VideoFileDescriptor(ImageFileDescriptor):", "and not force: return if getattr(instance, self.duration_field) and not force:", "as _ from .backends import get_backend_class from .files import VideoFile", "= duration_field super(VideoField, self).__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors", "errors.extend(self._check_backend()) return errors def _check_backend(self): backend = get_backend_class() return backend.check()", "VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def delete(self, save=True): # Clear", "save=True): # Clear the video info cache if hasattr(self, '_info_cache'):", "is defined duration = _file.duration if _file else None #", "force: return if getattr(instance, self.duration_field) and not force: return #", "import VideoFile class VideoFileDescriptor(ImageFileDescriptor): pass class VideoFieldFile(VideoFile, FieldFile): def delete(self,", "get duration if file is defined duration = _file.duration if", "self._info_cache super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class =", "self).update_dimension_fields(instance, force, *args, **kwargs) if not self.duration_field: return # Nothing", "VideoFileDescriptor description = _(\"Video\") def __init__(self, verbose_name=None, name=None, duration_field=None, **kwargs):", "def _check_backend(self): backend = get_backend_class() return backend.check() def to_python(self, data):", "`width` and `height` super(VideoField, self).update_dimension_fields(instance, force, *args, **kwargs) if not", "class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor description =", "super(VideoFieldFile, self).delete(save=save) class VideoField(ImageField): attr_class = VideoFieldFile descriptor_class = VideoFileDescriptor", "return # write `width` and `height` super(VideoField, self).update_dimension_fields(instance, force, *args," ]
[ "right=None): self.val = val self.left = left self.right = right", "len(array) if n == 0: return None m = n//2", "if n == 0: return None m = n//2 left,root,right", "array2BST(array): ''' array:sorted array ''' n = len(array) if n", "''' array:sorted array ''' n = len(array) if n ==", "n == 0: return None m = n//2 left,root,right =", "__init__(self, val=0, left=None, right=None): self.val = val self.left = left", "val=0, left=None, right=None): self.val = val self.left = left self.right", "= len(array) if n == 0: return None m =", "= right @staticmethod def array2BST(array): ''' array:sorted array ''' n", "0: return None m = n//2 left,root,right = array[:m],array[m],array[m+1:] return", "BST2array(node): ''' node:BST node ''' if not node: return []", "= val self.left = left self.right = right @staticmethod def", "BST: def __init__(self, val=0, left=None, right=None): self.val = val self.left", "self.right = right @staticmethod def array2BST(array): ''' array:sorted array '''", "array:sorted array ''' n = len(array) if n == 0:", "== 0: return None m = n//2 left,root,right = array[:m],array[m],array[m+1:]", "= array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST node", "val self.left = left self.right = right @staticmethod def array2BST(array):", "left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST", "<filename>BST.py class BST: def __init__(self, val=0, left=None, right=None): self.val =", "''' n = len(array) if n == 0: return None", "def array2BST(array): ''' array:sorted array ''' n = len(array) if", "@staticmethod def BST2array(node): ''' node:BST node ''' if not node:", "left=None, right=None): self.val = val self.left = left self.right =", "return None m = n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right))", "None m = n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod", "right @staticmethod def array2BST(array): ''' array:sorted array ''' n =", "n = len(array) if n == 0: return None m", "def BST2array(node): ''' node:BST node ''' if not node: return", "array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST node '''", "self.left = left self.right = right @staticmethod def array2BST(array): '''", "n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): '''", "= left self.right = right @staticmethod def array2BST(array): ''' array:sorted", "BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST node ''' if not", "class BST: def __init__(self, val=0, left=None, right=None): self.val = val", "array ''' n = len(array) if n == 0: return", "return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node): ''' node:BST node ''' if", "left self.right = right @staticmethod def array2BST(array): ''' array:sorted array", "def __init__(self, val=0, left=None, right=None): self.val = val self.left =", "= n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def BST2array(node):", "''' node:BST node ''' if not node: return [] return", "self.val = val self.left = left self.right = right @staticmethod", "m = n//2 left,root,right = array[:m],array[m],array[m+1:] return BST(root,BST.array2BST(left),BST.array2BST(right)) @staticmethod def", "@staticmethod def array2BST(array): ''' array:sorted array ''' n = len(array)", "node:BST node ''' if not node: return [] return BST.BST2array(node.left)+[node.val]+BST.BST2array(node.right)" ]
[ "]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'),", "'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json',", "all tests in test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file,", "[ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ])", "slm_lab.spec import spec_util import os import pandas as pd import", "'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file,", "def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json',", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def", "import spec_util import os import pandas as pd import pytest", "'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json',", "'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ])", "'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ])", "'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "[ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), #", "('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'),", "def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json',", "test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'),", "('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name',", "to run all tests in test_spec def run_trial_test(spec_file, spec_name=False): spec", "def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json',", "trial = Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame)", "trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'),", "('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name):", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'),", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'),", "('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), #", "def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json',", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ])", "('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ])", "('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file,", "'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file,", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json',", "test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'),", "]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json',", "os import pandas as pd import pytest import sys #", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), #", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json',", "'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name):", "# ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'),", "# ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky", "tests in test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name)", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'),", "'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file,", "flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace from", "('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def", "@flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def", "'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ])", "('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json',", "def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'),", "'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file,", "('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'),", "('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file,", "from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util from slm_lab.spec", "run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json',", "import util from slm_lab.spec import spec_util import os import pandas", "spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial =", "test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'),", "'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file,", "('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file,", "('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json',", "]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'),", "'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ])", "def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec)", "= trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json',", "# ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name):", "'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file,", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json',", "test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'),", "@pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'),", "('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json',", "info_space.tick('trial') trial = Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data,", "'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file,", "]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'),", "('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util from", "= InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data = trial.run()", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file,", "('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), #", "('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), #", "'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json',", "def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json',", "'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json',", "'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file,", "('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file,", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file,", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json',", "('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def", "('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file,", "as pd import pytest import sys # helper method to", "('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file,", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json',", "import pandas as pd import pytest import sys # helper", "Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [", "'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json',", "method to run all tests in test_spec def run_trial_test(spec_file, spec_name=False):", "spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data", "('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'),", "sys # helper method to run all tests in test_spec", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name):", "test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'),", "import pytest import sys # helper method to run all", "# ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "= Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name',", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'),", "('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json',", "spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial", "slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util from slm_lab.spec import", "run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'),", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'),", "('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'),", "test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'),", "'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json',", "[ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file,", "('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name',", "]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'),", "('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'),", "('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def", "[ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), #", "('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file,", "('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "[ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), #", "test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json',", "('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file,", "'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'),", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "spec_name=False): spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space =", "'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json',", "('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'),", "'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json',", "spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec,", "def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'),", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'),", "assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), #", "('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), #", "('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def", "slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import", "# ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name):", "'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json',", "('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file,", "'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name):", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'),", "]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'),", "test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'),", "'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json',", "from slm_lab.spec import spec_util import os import pandas as pd", "('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file,", "'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json',", "]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'),", "('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name',", "'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'),", "'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file,", "'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ])", "]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'),", "flaky import flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import", "[ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ])", "('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'),", "def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json',", "= spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space)", "def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ])", "('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json',", "pytest import sys # helper method to run all tests", "InfoSpace from slm_lab.lib import util from slm_lab.spec import spec_util import", "'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name):", "'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json',", "'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'),", "pd import pytest import sys # helper method to run", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json',", "'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name):", "test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json',", "# ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def", "'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json',", "'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'),", "('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json',", "'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'),", "'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file,", "'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [", "]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'),", "spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json',", "test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json',", "import sys # helper method to run all tests in", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'),", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json',", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json',", "]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json',", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json',", "('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'),", "[ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ])", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'),", "'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'),", "('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name):", "[ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ])", "('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json',", "'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json',", "'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), #", "util from slm_lab.spec import spec_util import os import pandas as", "# ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name):", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'),", "[ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ])", "('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json',", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'),", "from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib", "# ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'),", "from slm_lab.lib import util from slm_lab.spec import spec_util import os", "slm_lab.lib import util from slm_lab.spec import spec_util import os import", "('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json',", "('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name',", "test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'),", "test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json',", "spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace()", "# ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file,", "def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'),", "spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'),", "('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'),", "# ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), #", "'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file,", "]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json',", "]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'),", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json',", "= spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial')", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json',", "'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [", "[ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def", "[ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def", "('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'),", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ])", "('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "[ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), #", "'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json',", "'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file,", "'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file,", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'),", "test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'),", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json',", "('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json',", "spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'),", "('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file,", "test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def", "('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'),", "]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'),", "('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name',", "<gh_stars>1-10 from flaky import flaky from slm_lab.experiment.control import Trial from", "'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file,", "'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky", "'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ])", "# ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def", "'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json',", "# ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name):", "run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json',", "spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'),", "test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'),", "[ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), #", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file,", "@pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file, spec_name):", "isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json',", "[ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "@flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json',", "'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'),", "'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ])", "# ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ])", "]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json',", "'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'),", "[ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json',", "]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'),", "('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'),", "trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'),", "'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json',", "'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky", "('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def", "import Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util", "@flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'),", "('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name):", "('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), #", "'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json',", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'),", "@flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json',", "('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ])", "pandas as pd import pytest import sys # helper method", "'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json',", "run all tests in test_spec def run_trial_test(spec_file, spec_name=False): spec =", "def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json',", "('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name):", "('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name):", "spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'),", "'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "# ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky", "('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json',", "('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def", "('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file,", "def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'),", "('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name):", "('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json',", "'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file,", "'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ])", "run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json',", "# helper method to run all tests in test_spec def", "'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json',", "'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [", "('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file,", "# ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file,", "# ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "[ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), #", "('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'),", "run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json',", "('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), #", "'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json',", "run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def", "[ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json',", "'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'),", "test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec =", "]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'),", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file,", "# ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file,", "'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json',", "('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'),", "info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data =", "('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'),", "@flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json',", "]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json',", "'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [", "('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json',", "spec_util import os import pandas as pd import pytest import", "test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'),", "@pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'),", "# ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name):", "def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json',", "import os import pandas as pd import pytest import sys", "('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), #", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), #", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file,", "'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'),", "test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), #", "'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name',", "# ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name):", "run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space", "spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'),", "spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'),", "]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'),", "from flaky import flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor", "helper method to run all tests in test_spec def run_trial_test(spec_file,", "('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def", "import InfoSpace from slm_lab.lib import util from slm_lab.spec import spec_util", "'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky", "('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json',", "InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data = trial.run() assert", "('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [", "'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'),", "('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def", "'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name)", "spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json',", "in test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec", "[ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name):", "[ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file, spec_name): run_trial_test(spec_file,", "spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'),", "import flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace" ]
[ "app.group.get_group_list() assert len(old_groups) == len(new_groups) def test_modify_group_header(app): if app.group.count() ==", "len(new_groups) def test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups =", "def test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list()", "= app.group.get_group_list() assert len(old_groups) == len(new_groups) def test_modify_group_header(app): if app.group.count()", "0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups = app.group.get_group_list()", "old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list() assert len(old_groups)", "app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups", "app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups = app.group.get_group_list() assert", "new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups) def test_modify_group_header(app): if", "= app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list() assert len(old_groups) ==", "== len(new_groups) def test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups", "model.group import Group def test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name=\"test\"))", "= app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups = app.group.get_group_list() assert len(old_groups) ==", "app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups)", "group\")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups) def test_modify_group_header(app):", "assert len(old_groups) == len(new_groups) def test_modify_group_header(app): if app.group.count() == 0:", "== 0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups =", "test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New", "import Group def test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups", "== 0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups =", "len(old_groups) == len(new_groups) def test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header=\"test\"))", "from model.group import Group def test_modify_group_name(app): if app.group.count() == 0:", "app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups) def", "test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New", "if app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\"))", "app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups", "app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list() assert len(old_groups) == len(new_groups)", "old_groups = app.group.get_group_list() app.group.modify_first_group(Group(header=\"New header\")) new_groups = app.group.get_group_list() assert len(old_groups)", "0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list()", "def test_modify_group_header(app): if app.group.count() == 0: app.group.create(Group(header=\"test\")) old_groups = app.group.get_group_list()", "if app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\"))", "Group def test_modify_group_name(app): if app.group.count() == 0: app.group.create(Group(name=\"test\")) old_groups =", "app.group.create(Group(name=\"test\")) old_groups = app.group.get_group_list() app.group.modify_first_group(Group(name=\"New group\")) new_groups = app.group.get_group_list() assert" ]
[ "WSGI spec. # # The data can only be read", "queued (and processed) by ReadMe if allowed self.metrics_core.process(req, res) yield", "be passed into the currently running WSGI web server. Args:", "import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper", "ReadMe Metrics Attributes: config (MetricsApiConfig): Contains the configuration settings for", "write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000)", "req.rm_content_length = content_length req.rm_body = content_body iterable = self.app(environ, _start_response)", "4 lines are a workaround for a serious shortcoming in", "iterable = self.app(environ, _start_response) for data in iterable: res_ctype =", "may be empty or missing try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0))", "used by other code down the # pipeline. # #", "req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000) if req.method", "Request class MetricsMiddleware: \"\"\"Core middleware class for ReadMe Metrics Attributes:", "\"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body =", "time import datetime from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import", "serious shortcoming in the # WSGI spec. # # The", "1000) if req.method == \"POST\": # The next 4 lines", "body res = ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"), )", "(h for h in response_headers if h[0] == \"Content-Length\"), None", "None req = Request(environ) def _start_response(_status, _response_headers, *args): write =", "down the # pipeline. # # For more info: https://stackoverflow.com/a/13106009/643951", "= str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000) if req.method ==", "htype and hlength: res_ctype = htype[1] res_clength = int(hlength[1]) #", "_status return write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time()", "For more info: https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH may", "== \"POST\": # The next 4 lines are a workaround", "config (MetricsApiConfig): Instance of MetricsApiConfig object \"\"\" self.config = config", "class MetricsMiddleware: \"\"\"Core middleware class for ReadMe Metrics Attributes: config", "response_headers if h[0] == \"Content-Type\"), None ) hlength = next(", "self.app = wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self, environ, start_response):", "from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import Request class MetricsMiddleware:", "([type]): Reference to the current WSGI application, which will be", "== \"Content-Length\"), None ) if htype and hlength: res_ctype =", "response_headers, response_status response_headers = _response_headers response_status = _status return write", "response_status, res_ctype, res_clength, data.decode(\"utf-8\"), ) # Send off data to", "res_clength = int(hlength[1]) # Populate response body res = ResponseInfoWrapper(", ") hlength = next( (h for h in response_headers if", "# Undocumented in WSGI spec but the iterable has to", "htype = next( (h for h in response_headers if h[0]", "NOT be calling this method yourself under normal circumstances. \"\"\"", "environ[\"wsgi.input\"].read(content_length) # guarding check to close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"):", "normal circumstances. \"\"\" response_headers = {} response_status = 0 iterable", "= {} response_status = 0 iterable = None req =", "for ReadMe Metrics Attributes: config (MetricsApiConfig): Contains the configuration settings", "yield data finally: # Undocumented in WSGI spec but the", "# pipeline. # # For more info: https://stackoverflow.com/a/13106009/643951 # the", "of MetricsApiConfig object \"\"\" self.config = config self.app = wsgi_app_reference", "= int(time.time() * 1000) if req.method == \"POST\": # The", "req = Request(environ) def _start_response(_status, _response_headers, *args): write = start_response(_status,", "a serious shortcoming in the # WSGI spec. # #", "to be queued (and processed) by ReadMe if allowed self.metrics_core.process(req,", "pipeline. # # For more info: https://stackoverflow.com/a/13106009/643951 # the environment", "and cannot be read again. As such, we read the", "h in response_headers if h[0] == \"Content-Type\"), None ) hlength", "by the running WSGI server. You should NOT be calling", "= next( (h for h in response_headers if h[0] ==", "= content_body iterable = self.app(environ, _start_response) for data in iterable:", "import ResponseInfoWrapper from werkzeug import Request class MetricsMiddleware: \"\"\"Core middleware", "after which the socket is exhausted # and cannot be", "that is called by the running WSGI server. You should", "_response_headers response_status = _status return write try: req.rm_start_dt = str(datetime.datetime.utcnow())", "we read the data and then # repopulate the variable", "to the current WSGI application, which will be wrapped config", "__init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs and initializes MetricsMiddleware WSGI", "# and cannot be read again. As such, we read", "<gh_stars>1-10 import io import time import datetime from readme_metrics.Metrics import", "in iterable: res_ctype = \"\" res_clength = 0 htype =", "the iterable has to be closed if hasattr(iterable, \"close\"): iterable.close()", "environment variable CONTENT_LENGTH may be empty or missing try: content_length", "config (MetricsApiConfig): Contains the configuration settings for the running middleware", "readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import", "Instance of MetricsApiConfig object \"\"\" self.config = config self.app =", "if h[0] == \"Content-Length\"), None ) if htype and hlength:", "guarding check to close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"]", "calling this method yourself under normal circumstances. \"\"\" response_headers =", "will be wrapped config (MetricsApiConfig): Instance of MetricsApiConfig object \"\"\"", "close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length", "empty or missing try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError):", "start_response): \"\"\"Method that is called by the running WSGI server.", "nonlocal response_headers, response_status response_headers = _response_headers response_status = _status return", "which will be wrapped config (MetricsApiConfig): Instance of MetricsApiConfig object", "exhausted # and cannot be read again. As such, we", "so that it can be used by other code down", "be used by other code down the # pipeline. #", "if h[0] == \"Content-Type\"), None ) hlength = next( (h", "# WSGI spec. # # The data can only be", "res_ctype = htype[1] res_clength = int(hlength[1]) # Populate response body", "\"\"\" Constructs and initializes MetricsMiddleware WSGI middleware to be passed", "middleware to be passed into the currently running WSGI web", "def _start_response(_status, _response_headers, *args): write = start_response(_status, _response_headers, *args) #", "= ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"), ) # Send", "passed into the currently running WSGI web server. Args: wsgi_app_reference", "_response_headers, *args): write = start_response(_status, _response_headers, *args) # Populate response", "in response_headers if h[0] == \"Content-Length\"), None ) if htype", "hlength: res_ctype = htype[1] res_clength = int(hlength[1]) # Populate response", "variable so that it can be used by other code", "config: MetricsApiConfig): \"\"\" Constructs and initializes MetricsMiddleware WSGI middleware to", "config self.app = wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self, environ,", "spec but the iterable has to be closed if hasattr(iterable,", "response_headers if h[0] == \"Content-Length\"), None ) if htype and", "is called by the running WSGI server. You should NOT", "(ValueError): content_length = 0 content_body = environ[\"wsgi.input\"].read(content_length) # guarding check", "code down the # pipeline. # # For more info:", "Undocumented in WSGI spec but the iterable has to be", "be wrapped config (MetricsApiConfig): Instance of MetricsApiConfig object \"\"\" self.config", "server. Args: wsgi_app_reference ([type]): Reference to the current WSGI application,", "None ) if htype and hlength: res_ctype = htype[1] res_clength", "(and processed) by ReadMe if allowed self.metrics_core.process(req, res) yield data", "class for ReadMe Metrics Attributes: config (MetricsApiConfig): Contains the configuration", "if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length = content_length", "= io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body = content_body iterable =", "MetricsApiConfig object \"\"\" self.config = config self.app = wsgi_app_reference self.metrics_core", ") # Send off data to be queued (and processed)", "currently running WSGI web server. Args: wsgi_app_reference ([type]): Reference to", "again. As such, we read the data and then #", "Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from", "check to close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] =", "be empty or missing try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except", "initializes MetricsMiddleware WSGI middleware to be passed into the currently", "data finally: # Undocumented in WSGI spec but the iterable", "= None req = Request(environ) def _start_response(_status, _response_headers, *args): write", "current WSGI application, which will be wrapped config (MetricsApiConfig): Instance", "The next 4 lines are a workaround for a serious", "h[0] == \"Content-Length\"), None ) if htype and hlength: res_ctype", "info (headers & status) nonlocal response_headers, response_status response_headers = _response_headers", ") if htype and hlength: res_ctype = htype[1] res_clength =", "response_status = _status return write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts", "\"POST\": # The next 4 lines are a workaround for", "= environ[\"wsgi.input\"].read(content_length) # guarding check to close stream if hasattr(environ[\"CONTENT_LENGTH\"],", "be read once, after which the socket is exhausted #", "_response_headers, *args) # Populate response info (headers & status) nonlocal", "import io import time import datetime from readme_metrics.Metrics import Metrics", "return write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() *", "settings for the running middleware instance \"\"\" def __init__(self, wsgi_app_reference,", "running WSGI server. You should NOT be calling this method", "ResponseInfoWrapper from werkzeug import Request class MetricsMiddleware: \"\"\"Core middleware class", "response info (headers & status) nonlocal response_headers, response_status response_headers =", "the current WSGI application, which will be wrapped config (MetricsApiConfig):", "processed) by ReadMe if allowed self.metrics_core.process(req, res) yield data finally:", "= _response_headers response_status = _status return write try: req.rm_start_dt =", "environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body = content_body", "be queued (and processed) by ReadMe if allowed self.metrics_core.process(req, res)", "int(time.time() * 1000) if req.method == \"POST\": # The next", "# # For more info: https://stackoverflow.com/a/13106009/643951 # the environment variable", "allowed self.metrics_core.process(req, res) yield data finally: # Undocumented in WSGI", "by other code down the # pipeline. # # For", "but the iterable has to be closed if hasattr(iterable, \"close\"):", "object \"\"\" self.config = config self.app = wsgi_app_reference self.metrics_core =", "info: https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH may be empty", "= int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length = 0 content_body =", "= _status return write try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts =", "workaround for a serious shortcoming in the # WSGI spec.", "method yourself under normal circumstances. \"\"\" response_headers = {} response_status", "in WSGI spec but the iterable has to be closed", "repopulate the variable so that it can be used by", "other code down the # pipeline. # # For more", "str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000) if req.method == \"POST\":", "h in response_headers if h[0] == \"Content-Length\"), None ) if", "try: req.rm_start_dt = str(datetime.datetime.utcnow()) req.rm_start_ts = int(time.time() * 1000) if", "# For more info: https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH", "to be passed into the currently running WSGI web server.", "# Send off data to be queued (and processed) by", "Attributes: config (MetricsApiConfig): Contains the configuration settings for the running", "wsgi_app_reference ([type]): Reference to the current WSGI application, which will", "import time import datetime from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig", "(headers & status) nonlocal response_headers, response_status response_headers = _response_headers response_status", "res) yield data finally: # Undocumented in WSGI spec but", "are a workaround for a serious shortcoming in the #", "middleware instance \"\"\" def __init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs", "= 0 htype = next( (h for h in response_headers", "= 0 iterable = None req = Request(environ) def _start_response(_status,", "* 1000) if req.method == \"POST\": # The next 4", "for a serious shortcoming in the # WSGI spec. #", "content_body = environ[\"wsgi.input\"].read(content_length) # guarding check to close stream if", "data and then # repopulate the variable so that it", "data to be queued (and processed) by ReadMe if allowed", "import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import Request", "such, we read the data and then # repopulate the", "running WSGI web server. Args: wsgi_app_reference ([type]): Reference to the", "io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body = content_body iterable = self.app(environ,", "= Metrics(config) def __call__(self, environ, start_response): \"\"\"Method that is called", "under normal circumstances. \"\"\" response_headers = {} response_status = 0", "\"\"\"Core middleware class for ReadMe Metrics Attributes: config (MetricsApiConfig): Contains", "MetricsMiddleware WSGI middleware to be passed into the currently running", "res_ctype, res_clength, data.decode(\"utf-8\"), ) # Send off data to be", "by ReadMe if allowed self.metrics_core.process(req, res) yield data finally: #", "Contains the configuration settings for the running middleware instance \"\"\"", "in the # WSGI spec. # # The data can", "*args) # Populate response info (headers & status) nonlocal response_headers,", "Metrics Attributes: config (MetricsApiConfig): Contains the configuration settings for the", "iterable = None req = Request(environ) def _start_response(_status, _response_headers, *args):", "read the data and then # repopulate the variable so", "it can be used by other code down the #", "to close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body)", "\"\" res_clength = 0 htype = next( (h for h", "for h in response_headers if h[0] == \"Content-Length\"), None )", "next( (h for h in response_headers if h[0] == \"Content-Type\"),", "then # repopulate the variable so that it can be", "environ, start_response): \"\"\"Method that is called by the running WSGI", "instance \"\"\" def __init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs and", "# The data can only be read once, after which", "(MetricsApiConfig): Contains the configuration settings for the running middleware instance", "except (ValueError): content_length = 0 content_body = environ[\"wsgi.input\"].read(content_length) # guarding", "werkzeug import Request class MetricsMiddleware: \"\"\"Core middleware class for ReadMe", "next( (h for h in response_headers if h[0] == \"Content-Length\"),", "_start_response) for data in iterable: res_ctype = \"\" res_clength =", "iterable: res_ctype = \"\" res_clength = 0 htype = next(", "# The next 4 lines are a workaround for a", "response_status = 0 iterable = None req = Request(environ) def", "cannot be read again. As such, we read the data", "readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import", "read once, after which the socket is exhausted # and", "read again. As such, we read the data and then", "more info: https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH may be", "called by the running WSGI server. You should NOT be", "start_response(_status, _response_headers, *args) # Populate response info (headers & status)", "\"Content-Length\"), None ) if htype and hlength: res_ctype = htype[1]", "# Populate response info (headers & status) nonlocal response_headers, response_status", "and initializes MetricsMiddleware WSGI middleware to be passed into the", "== \"Content-Type\"), None ) hlength = next( (h for h", "which the socket is exhausted # and cannot be read", "be read again. As such, we read the data and", "for data in iterable: res_ctype = \"\" res_clength = 0", "def __init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs and initializes MetricsMiddleware", "circumstances. \"\"\" response_headers = {} response_status = 0 iterable =", "once, after which the socket is exhausted # and cannot", "the currently running WSGI web server. Args: wsgi_app_reference ([type]): Reference", "a workaround for a serious shortcoming in the # WSGI", "WSGI server. You should NOT be calling this method yourself", "# guarding check to close stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close()", "server. You should NOT be calling this method yourself under", "web server. Args: wsgi_app_reference ([type]): Reference to the current WSGI", "You should NOT be calling this method yourself under normal", "# repopulate the variable so that it can be used", "self.app(environ, _start_response) for data in iterable: res_ctype = \"\" res_clength", "Args: wsgi_app_reference ([type]): Reference to the current WSGI application, which", "data can only be read once, after which the socket", "readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import Request class MetricsMiddleware: \"\"\"Core", "res_clength = 0 htype = next( (h for h in", "response_status response_headers = _response_headers response_status = _status return write try:", "configuration settings for the running middleware instance \"\"\" def __init__(self,", "= self.app(environ, _start_response) for data in iterable: res_ctype = \"\"", "response body res = ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"),", "data in iterable: res_ctype = \"\" res_clength = 0 htype", "= start_response(_status, _response_headers, *args) # Populate response info (headers &", "self.metrics_core = Metrics(config) def __call__(self, environ, start_response): \"\"\"Method that is", "import datetime from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig", "= \"\" res_clength = 0 htype = next( (h for", "middleware class for ReadMe Metrics Attributes: config (MetricsApiConfig): Contains the", "the socket is exhausted # and cannot be read again.", "= config self.app = wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self,", "and then # repopulate the variable so that it can", "wrapped config (MetricsApiConfig): Instance of MetricsApiConfig object \"\"\" self.config =", "0 iterable = None req = Request(environ) def _start_response(_status, _response_headers,", "hlength = next( (h for h in response_headers if h[0]", "wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs and initializes MetricsMiddleware WSGI middleware", "res_ctype = \"\" res_clength = 0 htype = next( (h", "htype[1] res_clength = int(hlength[1]) # Populate response body res =", "can be used by other code down the # pipeline.", "WSGI application, which will be wrapped config (MetricsApiConfig): Instance of", "only be read once, after which the socket is exhausted", "the configuration settings for the running middleware instance \"\"\" def", "if allowed self.metrics_core.process(req, res) yield data finally: # Undocumented in", "try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length = 0", "data.decode(\"utf-8\"), ) # Send off data to be queued (and", "can only be read once, after which the socket is", "& status) nonlocal response_headers, response_status response_headers = _response_headers response_status =", "CONTENT_LENGTH may be empty or missing try: content_length = int(environ.get(\"CONTENT_LENGTH\",", "MetricsApiConfig): \"\"\" Constructs and initializes MetricsMiddleware WSGI middleware to be", "0)) except (ValueError): content_length = 0 content_body = environ[\"wsgi.input\"].read(content_length) #", "req.rm_body = content_body iterable = self.app(environ, _start_response) for data in", "WSGI spec but the iterable has to be closed if", "(MetricsApiConfig): Instance of MetricsApiConfig object \"\"\" self.config = config self.app", "that it can be used by other code down the", "0 htype = next( (h for h in response_headers if", "Send off data to be queued (and processed) by ReadMe", "wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self, environ, start_response): \"\"\"Method that", "self.config = config self.app = wsgi_app_reference self.metrics_core = Metrics(config) def", "or missing try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length", "and hlength: res_ctype = htype[1] res_clength = int(hlength[1]) # Populate", "The data can only be read once, after which the", "def __call__(self, environ, start_response): \"\"\"Method that is called by the", "this method yourself under normal circumstances. \"\"\" response_headers = {}", "the variable so that it can be used by other", "content_length req.rm_body = content_body iterable = self.app(environ, _start_response) for data", "the running WSGI server. You should NOT be calling this", "Populate response body res = ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength,", "= wsgi_app_reference self.metrics_core = Metrics(config) def __call__(self, environ, start_response): \"\"\"Method", "MetricsMiddleware: \"\"\"Core middleware class for ReadMe Metrics Attributes: config (MetricsApiConfig):", "if htype and hlength: res_ctype = htype[1] res_clength = int(hlength[1])", "# the environment variable CONTENT_LENGTH may be empty or missing", "h[0] == \"Content-Type\"), None ) hlength = next( (h for", "response_headers = _response_headers response_status = _status return write try: req.rm_start_dt", "next 4 lines are a workaround for a serious shortcoming", "if req.method == \"POST\": # The next 4 lines are", "content_length = 0 content_body = environ[\"wsgi.input\"].read(content_length) # guarding check to", "MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug import Request class", "environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body = content_body iterable", "res_clength, data.decode(\"utf-8\"), ) # Send off data to be queued", "for the running middleware instance \"\"\" def __init__(self, wsgi_app_reference, config:", "running middleware instance \"\"\" def __init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\"", "shortcoming in the # WSGI spec. # # The data", "ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"), ) # Send off", "status) nonlocal response_headers, response_status response_headers = _response_headers response_status = _status", "variable CONTENT_LENGTH may be empty or missing try: content_length =", "Reference to the current WSGI application, which will be wrapped", "response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"), ) # Send off data", "ReadMe if allowed self.metrics_core.process(req, res) yield data finally: # Undocumented", "into the currently running WSGI web server. Args: wsgi_app_reference ([type]):", "the # pipeline. # # For more info: https://stackoverflow.com/a/13106009/643951 #", "from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper", "write = start_response(_status, _response_headers, *args) # Populate response info (headers", "finally: # Undocumented in WSGI spec but the iterable has", "the # WSGI spec. # # The data can only", "# Populate response body res = ResponseInfoWrapper( response_headers, response_status, res_ctype,", "\"\"\" self.config = config self.app = wsgi_app_reference self.metrics_core = Metrics(config)", "Request(environ) def _start_response(_status, _response_headers, *args): write = start_response(_status, _response_headers, *args)", "\"\"\" response_headers = {} response_status = 0 iterable = None", "is exhausted # and cannot be read again. As such,", "0 content_body = environ[\"wsgi.input\"].read(content_length) # guarding check to close stream", "WSGI middleware to be passed into the currently running WSGI", "stream if hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length =", "spec. # # The data can only be read once,", "in response_headers if h[0] == \"Content-Type\"), None ) hlength =", "__call__(self, environ, start_response): \"\"\"Method that is called by the running", "= htype[1] res_clength = int(hlength[1]) # Populate response body res", "should NOT be calling this method yourself under normal circumstances.", "from werkzeug import Request class MetricsMiddleware: \"\"\"Core middleware class for", "import Request class MetricsMiddleware: \"\"\"Core middleware class for ReadMe Metrics", "missing try: content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length =", "res = ResponseInfoWrapper( response_headers, response_status, res_ctype, res_clength, data.decode(\"utf-8\"), ) #", "\"\"\"Method that is called by the running WSGI server. You", "be calling this method yourself under normal circumstances. \"\"\" response_headers", "https://stackoverflow.com/a/13106009/643951 # the environment variable CONTENT_LENGTH may be empty or", "= int(hlength[1]) # Populate response body res = ResponseInfoWrapper( response_headers,", "content_body iterable = self.app(environ, _start_response) for data in iterable: res_ctype", "yourself under normal circumstances. \"\"\" response_headers = {} response_status =", "self.metrics_core.process(req, res) yield data finally: # Undocumented in WSGI spec", "WSGI web server. Args: wsgi_app_reference ([type]): Reference to the current", "_start_response(_status, _response_headers, *args): write = start_response(_status, _response_headers, *args) # Populate", "application, which will be wrapped config (MetricsApiConfig): Instance of MetricsApiConfig", "int(hlength[1]) # Populate response body res = ResponseInfoWrapper( response_headers, response_status,", "off data to be queued (and processed) by ReadMe if", "= content_length req.rm_body = content_body iterable = self.app(environ, _start_response) for", "the running middleware instance \"\"\" def __init__(self, wsgi_app_reference, config: MetricsApiConfig):", "the data and then # repopulate the variable so that", "content_length = int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length = 0 content_body", "Constructs and initializes MetricsMiddleware WSGI middleware to be passed into", "{} response_status = 0 iterable = None req = Request(environ)", "= Request(environ) def _start_response(_status, _response_headers, *args): write = start_response(_status, _response_headers,", "io import time import datetime from readme_metrics.Metrics import Metrics from", "*args): write = start_response(_status, _response_headers, *args) # Populate response info", "the environment variable CONTENT_LENGTH may be empty or missing try:", "# # The data can only be read once, after", "hasattr(environ[\"CONTENT_LENGTH\"], \"close\"): environ[\"wsgi.input\"].close() environ[\"wsgi.input\"] = io.BytesIO(content_body) req.rm_content_length = content_length req.rm_body", "Metrics(config) def __call__(self, environ, start_response): \"\"\"Method that is called by", "response_headers = {} response_status = 0 iterable = None req", "Populate response info (headers & status) nonlocal response_headers, response_status response_headers", "(h for h in response_headers if h[0] == \"Content-Type\"), None", "None ) hlength = next( (h for h in response_headers", "req.rm_start_ts = int(time.time() * 1000) if req.method == \"POST\": #", "for h in response_headers if h[0] == \"Content-Type\"), None )", "from readme_metrics.MetricsApiConfig import MetricsApiConfig from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper from werkzeug", "lines are a workaround for a serious shortcoming in the", "As such, we read the data and then # repopulate", "= 0 content_body = environ[\"wsgi.input\"].read(content_length) # guarding check to close", "int(environ.get(\"CONTENT_LENGTH\", 0)) except (ValueError): content_length = 0 content_body = environ[\"wsgi.input\"].read(content_length)", "socket is exhausted # and cannot be read again. As", "\"\"\" def __init__(self, wsgi_app_reference, config: MetricsApiConfig): \"\"\" Constructs and initializes", "req.method == \"POST\": # The next 4 lines are a", "datetime from readme_metrics.Metrics import Metrics from readme_metrics.MetricsApiConfig import MetricsApiConfig from", "\"Content-Type\"), None ) hlength = next( (h for h in" ]
[ "env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\") plt.ylabel(\"Cumulative Reward\") plt.title(\"Performance\") plt.show() plt.close()", "x,y in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: # training", "exploration_decay = 0.98 k = 500 # number of nearest", "parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps',", "populated (atleast one episode has been run) # testing phase:", "value function for this episode val = 0 for t", "# first state observed states = np.zeros((max_state_size, observation.size)) if num_iter", "+ 1 # normalize by number of times action occured", "took rewards[num_iter-1] = reward # and the reward we obtained", "value for act in action_list: action_list[act] = action_list[act] / freq_list[act]", "gamma = 0.95 max_state_size = 15000 # because we don't", "and reward obtained if num_iter < max_state_size: states[num_iter] = observation", "phase: exploration randomly picks an action take_action = action_space.sample() #", "take_action = action_space.sample() # populate the state present, action taken", "decay exploration probability epsilon *= exploration_decay # do not decay", "indices = nbrs.kneighbors(observation) # find the best action action_list =", "j in range(args.max_timesteps): env.render() action = make_move(observation, reward, done) observation,", "to pick the action which has the highest reward nbrs", "values is populated (atleast one episode has been run) #", "np import gym from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as", "# number of nearest neighbors minimum_num_iters = 500 # number", "indices[0]: v = values[i] a = actions[i] vnew = action_list.get(a,", "of iterations used for training num_iter = 0 max_iters =", "sorted_list[0][1] else: # training phase: exploration randomly picks an action", "env.action_space # hyperparameters: epsilon = 1.0 exploration_decay = 0.98 k", "if num_iter > minimum_num_iters and np.random.rand() > epsilon and values:", "episode_beginning, max_iters, epsilon if states is None: # first state", "warn(*args, **kwargs): pass import warnings warnings.warn = warn reward =", "normalize by number of times action occured and take action", "with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200)", "num_iter, episode_beginning, max_iters, epsilon if states is None: # first", "v action_list[a] = vnew freq_list[a] = freq_list.get(a, 0) + 1", "size in continuous environments # learning-related variables states = None", "episode-related variables episode_beginning = 0 def make_move(observation, reward, done): global", "for i in indices[0]: v = values[i] a = actions[i]", "{} values = {} # episode-related variables episode_beginning = 0", "= action_space.sample() # populate the state present, action taken and", "values, rewards, num_iter, episode_beginning, max_iters, epsilon if states is None:", "1.0 exploration_decay = 0.98 k = 500 # number of", "+ rewards.get(t,0) values[t] = val episode_beginning = num_iter max_iters =", "num_iter max_iters = min(max(max_iters, num_iter), max_state_size) # decay exploration probability", "nbrs.kneighbors(observation) # find the best action action_list = {} freq_list", "action action_list = {} freq_list = {} for i in", "done) observation, reward, done, _ = env.step(action) sum_reward += reward", "actions[num_iter] = take_action # and the action we took rewards[num_iter-1]", "run) # testing phase: exploitation # Uses k=500 nearest neighbors", "[(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else:", "values[i] a = actions[i] vnew = action_list.get(a, 0) + v", "epsilon = 1.0 exploration_decay = 0.98 k = 500 #", "actions = {} rewards = {} values = {} #", "the value function for this episode val = 0 for", "num_iter > minimum_num_iters and np.random.rand() > epsilon and values: #", "minimum_num_iters = 500 # number of iterations used for training", "import numpy as np import gym from sklearn.neighbors import NearestNeighbors", "observation.size)) if num_iter > minimum_num_iters and np.random.rand() > epsilon and", "is None: # first state observed states = np.zeros((max_state_size, observation.size))", "0.95 + sum_reward * 0.05 print('Reward for episode '+ str(i)+'", "variables states = None actions = {} rewards = {}", "import gym from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt", "500 # number of nearest neighbors minimum_num_iters = 500 #", "last time step values[num_iter-1] = 0 num_iter += 1 if", "times action occured and take action with highest value for", "False cumulative_reward_list = [] for i in range(args.episodes): observation =", "import warnings warnings.warn = warn reward = 0 episode_reward =", "learning-related variables states = None actions = {} rewards =", "**kwargs): pass import warnings warnings.warn = warn reward = 0", "obtained last time step values[num_iter-1] = 0 num_iter += 1", "for i in range(args.episodes): observation = env.reset() sum_reward = 0", "rewards = {} values = {} # episode-related variables episode_beginning", "action_list: action_list[act] = action_list[act] / freq_list[act] sorted_list = [(y,x) for", "episode_beginning = num_iter max_iters = min(max(max_iters, num_iter), max_state_size) # decay", "import argparse parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int,", "epsilon *= exploration_decay # do not decay below 0 epsilon", "a = actions[i] vnew = action_list.get(a, 0) + v action_list[a]", "= 0 def make_move(observation, reward, done): global states, actions, values,", "is populated (atleast one episode has been run) # testing", "= [] for i in range(args.episodes): observation = env.reset() sum_reward", "done: # end of episode: calculate the value function for", "freq_list[a] = freq_list.get(a, 0) + 1 # normalize by number", "= max(epsilon, 0) return take_action # Ignore sklearn warnings def", "numpy as np import gym from sklearn.neighbors import NearestNeighbors import", "# number of iterations used for training num_iter = 0", "# if amount of data is sufficient and values is", "# testing phase: exploitation # Uses k=500 nearest neighbors to", "1 if done: # end of episode: calculate the value", "if done: break episode_reward = episode_reward * 0.95 + sum_reward", "= episode_reward * 0.95 + sum_reward * 0.05 print('Reward for", "{} rewards = {} values = {} # episode-related variables", "NearestNeighbors import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser(description='KBRL", "observation # save the state actions[num_iter] = take_action # and", "= take_action # and the action we took rewards[num_iter-1] =", "distances, indices = nbrs.kneighbors(observation) # find the best action action_list", "# hyperparameters: epsilon = 1.0 exploration_decay = 0.98 k =", "step values[num_iter-1] = 0 num_iter += 1 if done: #", "actions[i] vnew = action_list.get(a, 0) + v action_list[a] = vnew", "v = values[i] a = actions[i] vnew = action_list.get(a, 0)", "warnings def warn(*args, **kwargs): pass import warnings warnings.warn = warn", "= 500 # number of nearest neighbors minimum_num_iters = 500", "val + rewards.get(t,0) values[t] = val episode_beginning = num_iter max_iters", "= parser.parse_args() env = gym.make(args.environment).env action_space = env.action_space # hyperparameters:", "= 0 gamma = 0.95 max_state_size = 15000 # because", "which has the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices", "0.98 k = 500 # number of nearest neighbors minimum_num_iters", "t in reversed(range(episode_beginning, num_iter)): val = gamma * val +", "# populate the state present, action taken and reward obtained", "of nearest neighbors minimum_num_iters = 500 # number of iterations", "reward = 0 episode_reward = 0 done = False cumulative_reward_list", "= num_iter max_iters = min(max(max_iters, num_iter), max_state_size) # decay exploration", "= [(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1]", "= gamma * val + rewards.get(t,0) values[t] = val episode_beginning", "in action_list: action_list[act] = action_list[act] / freq_list[act] sorted_list = [(y,x)", "as np import gym from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot", "occured and take action with highest value for act in", "= env.action_space # hyperparameters: epsilon = 1.0 exploration_decay = 0.98", "def make_move(observation, reward, done): global states, actions, values, rewards, num_iter,", "has been run) # testing phase: exploitation # Uses k=500", "= action_list.get(a, 0) + v action_list[a] = vnew freq_list[a] =", "and np.random.rand() > epsilon and values: # if amount of", "and values is populated (atleast one episode has been run)", "NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) # find the best action", "number of nearest neighbors minimum_num_iters = 500 # number of", "= gym.make(args.environment).env action_space = env.action_space # hyperparameters: epsilon = 1.0", "default=200) parser.add_argument('environment') args = parser.parse_args() env = gym.make(args.environment).env action_space =", "nearest neighbors minimum_num_iters = 500 # number of iterations used", "env.step(action) sum_reward += reward if done: break episode_reward = episode_reward", "in reversed(range(episode_beginning, num_iter)): val = gamma * val + rewards.get(t,0)", "action with highest value for act in action_list: action_list[act] =", "# find the best action action_list = {} freq_list =", "values: # if amount of data is sufficient and values", "action taken and reward obtained if num_iter < max_state_size: states[num_iter]", "else: # training phase: exploration randomly picks an action take_action", "warnings.warn = warn reward = 0 episode_reward = 0 done", "below 0 epsilon = max(epsilon, 0) return take_action # Ignore", "values = {} # episode-related variables episode_beginning = 0 def", "reward # and the reward we obtained last time step", "num_iter = 0 max_iters = 0 gamma = 0.95 max_state_size", "+ sum_reward * 0.05 print('Reward for episode '+ str(i)+' :", "gamma * val + rewards.get(t,0) values[t] = val episode_beginning =", "= min(max(max_iters, num_iter), max_state_size) # decay exploration probability epsilon *=", "= env.step(action) sum_reward += reward if done: break episode_reward =", "been run) # testing phase: exploitation # Uses k=500 nearest", "taken and reward obtained if num_iter < max_state_size: states[num_iter] =", "val = gamma * val + rewards.get(t,0) values[t] = val", "action take_action = action_space.sample() # populate the state present, action", "iterations used for training num_iter = 0 max_iters = 0", "picks an action take_action = action_space.sample() # populate the state", "warn reward = 0 episode_reward = 0 done = False", "# env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\") plt.ylabel(\"Cumulative Reward\") plt.title(\"Performance\") plt.show()", "freq_list[act] sorted_list = [(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action", "parser.parse_args() env = gym.make(args.environment).env action_space = env.action_space # hyperparameters: epsilon", "= 0.98 k = 500 # number of nearest neighbors", "import NearestNeighbors import matplotlib.pyplot as plt import argparse parser =", "500 # number of iterations used for training num_iter =", "num_iter += 1 if done: # end of episode: calculate", "= warn reward = 0 episode_reward = 0 done =", "env.render() action = make_move(observation, reward, done) observation, reward, done, _", "values[t] = val episode_beginning = num_iter max_iters = min(max(max_iters, num_iter),", "sklearn warnings def warn(*args, **kwargs): pass import warnings warnings.warn =", "done): global states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon", "time step values[num_iter-1] = 0 num_iter += 1 if done:", "by number of times action occured and take action with", "neighbors to pick the action which has the highest reward", "if states is None: # first state observed states =", "args = parser.parse_args() env = gym.make(args.environment).env action_space = env.action_space #", "type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args = parser.parse_args()", "if done: # end of episode: calculate the value function", "= 0.95 max_state_size = 15000 # because we don't know", "Ignore sklearn warnings def warn(*args, **kwargs): pass import warnings warnings.warn", "reward, done) observation, reward, done, _ = env.step(action) sum_reward +=", "epsilon = max(epsilon, 0) return take_action # Ignore sklearn warnings", "done = False cumulative_reward_list = [] for i in range(args.episodes):", "parser.add_argument('environment') args = parser.parse_args() env = gym.make(args.environment).env action_space = env.action_space", "in indices[0]: v = values[i] a = actions[i] vnew =", "freq_list = {} for i in indices[0]: v = values[i]", "0 episode_reward = 0 done = False cumulative_reward_list = []", "= vnew freq_list[a] = freq_list.get(a, 0) + 1 # normalize", "nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) # find the", "neighbors minimum_num_iters = 500 # number of iterations used for", "None: # first state observed states = np.zeros((max_state_size, observation.size)) if", "= 0 for t in reversed(range(episode_beginning, num_iter)): val = gamma", "reward, done, _ = env.step(action) sum_reward += reward if done:", "min(max(max_iters, num_iter), max_state_size) # decay exploration probability epsilon *= exploration_decay", "if num_iter < max_state_size: states[num_iter] = observation # save the", "number of iterations used for training num_iter = 0 max_iters", "reward obtained if num_iter < max_state_size: states[num_iter] = observation #", "sum_reward += reward if done: break episode_reward = episode_reward *", "= np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters and np.random.rand() >", "i in range(args.episodes): observation = env.reset() sum_reward = 0 for", "KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment')", "15000 # because we don't know the state space size", "<filename>kbrl.py import numpy as np import gym from sklearn.neighbors import", "exploration probability epsilon *= exploration_decay # do not decay below", "freq_list.get(a, 0) + 1 # normalize by number of times", "global states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon if", "decay below 0 epsilon = max(epsilon, 0) return take_action #", "(atleast one episode has been run) # testing phase: exploitation", "max_iters = min(max(max_iters, num_iter), max_state_size) # decay exploration probability epsilon", "0 num_iter += 1 if done: # end of episode:", "in range(args.max_timesteps): env.render() action = make_move(observation, reward, done) observation, reward,", "# decay exploration probability epsilon *= exploration_decay # do not", "hyperparameters: epsilon = 1.0 exploration_decay = 0.98 k = 500", "sum_reward = 0 for j in range(args.max_timesteps): env.render() action =", "= 0 for j in range(args.max_timesteps): env.render() action = make_move(observation,", "{} freq_list = {} for i in indices[0]: v =", "amount of data is sufficient and values is populated (atleast", "has the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices =", "'+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2)", "> epsilon and values: # if amount of data is", "the action which has the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters])", "episode_reward = 0 done = False cumulative_reward_list = [] for", "and the action we took rewards[num_iter-1] = reward # and", "best action action_list = {} freq_list = {} for i", "= {} values = {} # episode-related variables episode_beginning =", "= val episode_beginning = num_iter max_iters = min(max(max_iters, num_iter), max_state_size)", "0 gamma = 0.95 max_state_size = 15000 # because we", "exploration randomly picks an action take_action = action_space.sample() # populate", "we took rewards[num_iter-1] = reward # and the reward we", "= 0 num_iter += 1 if done: # end of", "this episode val = 0 for t in reversed(range(episode_beginning, num_iter)):", "action we took rewards[num_iter-1] = reward # and the reward", "matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser(description='KBRL with KNN')", "minimum_num_iters and np.random.rand() > epsilon and values: # if amount", "num_iter), max_state_size) # decay exploration probability epsilon *= exploration_decay #", "gym from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt import", "end of episode: calculate the value function for this episode", "nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args =", "training phase: exploration randomly picks an action take_action = action_space.sample()", "np.random.rand() > epsilon and values: # if amount of data", "max_state_size = 15000 # because we don't know the state", "episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list,", "obtained if num_iter < max_state_size: states[num_iter] = observation # save", "> minimum_num_iters and np.random.rand() > epsilon and values: # if", "env.reset() sum_reward = 0 for j in range(args.max_timesteps): env.render() action", "don't know the state space size in continuous environments #", "= 0 episode_reward = 0 done = False cumulative_reward_list =", "# save the state actions[num_iter] = take_action # and the", "vnew = action_list.get(a, 0) + v action_list[a] = vnew freq_list[a]", "return take_action # Ignore sklearn warnings def warn(*args, **kwargs): pass", "rewards.get(t,0) values[t] = val episode_beginning = num_iter max_iters = min(max(max_iters,", "max_state_size) # decay exploration probability epsilon *= exploration_decay # do", "is sufficient and values is populated (atleast one episode has", "+ v action_list[a] = vnew freq_list[a] = freq_list.get(a, 0) +", "0) return take_action # Ignore sklearn warnings def warn(*args, **kwargs):", "variables episode_beginning = 0 def make_move(observation, reward, done): global states,", "action_space = env.action_space # hyperparameters: epsilon = 1.0 exploration_decay =", "action_list.get(a, 0) + v action_list[a] = vnew freq_list[a] = freq_list.get(a,", "* val + rewards.get(t,0) values[t] = val episode_beginning = num_iter", "/ freq_list[act] sorted_list = [(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True)", "= env.reset() sum_reward = 0 for j in range(args.max_timesteps): env.render()", "make_move(observation, reward, done): global states, actions, values, rewards, num_iter, episode_beginning,", "populate the state present, action taken and reward obtained if", "0 for j in range(args.max_timesteps): env.render() action = make_move(observation, reward,", "argparse parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500)", "data is sufficient and values is populated (atleast one episode", "0.95 max_state_size = 15000 # because we don't know the", "# and the action we took rewards[num_iter-1] = reward #", "used for training num_iter = 0 max_iters = 0 gamma", "take_action # and the action we took rewards[num_iter-1] = reward", "state actions[num_iter] = take_action # and the action we took", "= 15000 # because we don't know the state space", "[] for i in range(args.episodes): observation = env.reset() sum_reward =", "because we don't know the state space size in continuous", "not decay below 0 epsilon = max(epsilon, 0) return take_action", "environments # learning-related variables states = None actions = {}", "Uses k=500 nearest neighbors to pick the action which has", "for act in action_list: action_list[act] = action_list[act] / freq_list[act] sorted_list", "one episode has been run) # testing phase: exploitation #", "= values[i] a = actions[i] vnew = action_list.get(a, 0) +", "pass import warnings warnings.warn = warn reward = 0 episode_reward", "the state actions[num_iter] = take_action # and the action we", "< max_state_size: states[num_iter] = observation # save the state actions[num_iter]", "and take action with highest value for act in action_list:", "reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) # find", "= None actions = {} rewards = {} values =", "= 500 # number of iterations used for training num_iter", "continuous environments # learning-related variables states = None actions =", "action occured and take action with highest value for act", "= 0 done = False cumulative_reward_list = [] for i", "and values: # if amount of data is sufficient and", "val episode_beginning = num_iter max_iters = min(max(max_iters, num_iter), max_state_size) #", "states is None: # first state observed states = np.zeros((max_state_size,", "# and the reward we obtained last time step values[num_iter-1]", "states = None actions = {} rewards = {} values", "from sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt import argparse", "nargs='?', type=int, default=200) parser.add_argument('environment') args = parser.parse_args() env = gym.make(args.environment).env", "= 1.0 exploration_decay = 0.98 k = 500 # number", "sum_reward * 0.05 print('Reward for episode '+ str(i)+' : '+str(episode_reward))", "env = gym.make(args.environment).env action_space = env.action_space # hyperparameters: epsilon =", "for j in range(args.max_timesteps): env.render() action = make_move(observation, reward, done)", "_ = env.step(action) sum_reward += reward if done: break episode_reward", "action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: # training phase: exploration", "highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) #", "= {} rewards = {} values = {} # episode-related", "state present, action taken and reward obtained if num_iter <", "know the state space size in continuous environments # learning-related", "*= exploration_decay # do not decay below 0 epsilon =", "max_iters = 0 gamma = 0.95 max_state_size = 15000 #", "np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters and np.random.rand() > epsilon", "= {} freq_list = {} for i in indices[0]: v", "exploration_decay # do not decay below 0 epsilon = max(epsilon,", "* 0.95 + sum_reward * 0.05 print('Reward for episode '+", "# learning-related variables states = None actions = {} rewards", "# Uses k=500 nearest neighbors to pick the action which", "episode val = 0 for t in reversed(range(episode_beginning, num_iter)): val", "action which has the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances,", "= 0 max_iters = 0 gamma = 0.95 max_state_size =", "break episode_reward = episode_reward * 0.95 + sum_reward * 0.05", "rewards[num_iter-1] = reward # and the reward we obtained last", "function for this episode val = 0 for t in", "0 def make_move(observation, reward, done): global states, actions, values, rewards,", "space size in continuous environments # learning-related variables states =", "of data is sufficient and values is populated (atleast one", "= freq_list.get(a, 0) + 1 # normalize by number of", "epsilon if states is None: # first state observed states", "cumulative_reward_list = [] for i in range(args.episodes): observation = env.reset()", "# training phase: exploration randomly picks an action take_action =", "we don't know the state space size in continuous environments", "reversed(range(episode_beginning, num_iter)): val = gamma * val + rewards.get(t,0) values[t]", "k=500 nearest neighbors to pick the action which has the", "0) + v action_list[a] = vnew freq_list[a] = freq_list.get(a, 0)", "an action take_action = action_space.sample() # populate the state present,", "'+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\") plt.ylabel(\"Cumulative Reward\")", "i in indices[0]: v = values[i] a = actions[i] vnew", "range(args.max_timesteps): env.render() action = make_move(observation, reward, done) observation, reward, done,", "type=int, default=200) parser.add_argument('environment') args = parser.parse_args() env = gym.make(args.environment).env action_space", "reward if done: break episode_reward = episode_reward * 0.95 +", "take_action # Ignore sklearn warnings def warn(*args, **kwargs): pass import", "cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\") plt.ylabel(\"Cumulative Reward\") plt.title(\"Performance\")", "the state space size in continuous environments # learning-related variables", "= nbrs.kneighbors(observation) # find the best action action_list = {}", "0) + 1 # normalize by number of times action", "episode_reward * 0.95 + sum_reward * 0.05 print('Reward for episode", ": '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\") plt.ylabel(\"Cumulative", "of episode: calculate the value function for this episode val", "the state present, action taken and reward obtained if num_iter", "# episode-related variables episode_beginning = 0 def make_move(observation, reward, done):", "for episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500),", "testing phase: exploitation # Uses k=500 nearest neighbors to pick", "randomly picks an action take_action = action_space.sample() # populate the", "in range(args.episodes): observation = env.reset() sum_reward = 0 for j", "episode: calculate the value function for this episode val =", "we obtained last time step values[num_iter-1] = 0 num_iter +=", "find the best action action_list = {} freq_list = {}", "val = 0 for t in reversed(range(episode_beginning, num_iter)): val =", "done: break episode_reward = episode_reward * 0.95 + sum_reward *", "num_iter < max_state_size: states[num_iter] = observation # save the state", "as plt import argparse parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes',", "vnew freq_list[a] = freq_list.get(a, 0) + 1 # normalize by", "= observation # save the state actions[num_iter] = take_action #", "pick the action which has the highest reward nbrs =", "observation, reward, done, _ = env.step(action) sum_reward += reward if", "do not decay below 0 epsilon = max(epsilon, 0) return", "observed states = np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters and", "parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args", "act in action_list: action_list[act] = action_list[act] / freq_list[act] sorted_list =", "* 0.05 print('Reward for episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward)", "0 max_iters = 0 gamma = 0.95 max_state_size = 15000", "training num_iter = 0 max_iters = 0 gamma = 0.95", "of times action occured and take action with highest value", "states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon if states", "action_space.sample() # populate the state present, action taken and reward", "nearest neighbors to pick the action which has the highest", "= make_move(observation, reward, done) observation, reward, done, _ = env.step(action)", "values[num_iter-1] = 0 num_iter += 1 if done: # end", "None actions = {} rewards = {} values = {}", "if amount of data is sufficient and values is populated", "for training num_iter = 0 max_iters = 0 gamma =", "make_move(observation, reward, done) observation, reward, done, _ = env.step(action) sum_reward", "calculate the value function for this episode val = 0", "= {} # episode-related variables episode_beginning = 0 def make_move(observation,", "0 for t in reversed(range(episode_beginning, num_iter)): val = gamma *", "= actions[i] vnew = action_list.get(a, 0) + v action_list[a] =", "0 epsilon = max(epsilon, 0) return take_action # Ignore sklearn", "num_iter)): val = gamma * val + rewards.get(t,0) values[t] =", "phase: exploitation # Uses k=500 nearest neighbors to pick the", "import matplotlib.pyplot as plt import argparse parser = argparse.ArgumentParser(description='KBRL with", "= NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation) # find the best", "save the state actions[num_iter] = take_action # and the action", "states = np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters and np.random.rand()", "{} for i in indices[0]: v = values[i] a =", "action_list = {} freq_list = {} for i in indices[0]:", "action_list[a] = vnew freq_list[a] = freq_list.get(a, 0) + 1 #", "present, action taken and reward obtained if num_iter < max_state_size:", "argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?', type=int,", "episode has been run) # testing phase: exploitation # Uses", "default=500) parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args = parser.parse_args() env", "k = 500 # number of nearest neighbors minimum_num_iters =", "actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon if states is", "with highest value for act in action_list: action_list[act] = action_list[act]", "states[num_iter] = observation # save the state actions[num_iter] = take_action", "# do not decay below 0 epsilon = max(epsilon, 0)", "sorted_list = [(y,x) for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action =", "= argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?', type=int, default=500) parser.add_argument('--max_timesteps', nargs='?',", "range(args.episodes): observation = env.reset() sum_reward = 0 for j in", "reward we obtained last time step values[num_iter-1] = 0 num_iter", "max(epsilon, 0) return take_action # Ignore sklearn warnings def warn(*args,", "for t in reversed(range(episode_beginning, num_iter)): val = gamma * val", "the reward we obtained last time step values[num_iter-1] = 0", "for x,y in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: #", "str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render() plt.plot(range(0,500), cumulative_reward_list, linewidth=2) plt.xlabel(\"Episodes\")", "and the reward we obtained last time step values[num_iter-1] =", "sufficient and values is populated (atleast one episode has been", "in action_list.items()] sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: # training phase:", "sklearn.neighbors import NearestNeighbors import matplotlib.pyplot as plt import argparse parser", "0.05 print('Reward for episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) #", "reward, done): global states, actions, values, rewards, num_iter, episode_beginning, max_iters,", "episode_beginning = 0 def make_move(observation, reward, done): global states, actions,", "= {} for i in indices[0]: v = values[i] a", "the action we took rewards[num_iter-1] = reward # and the", "# normalize by number of times action occured and take", "# Ignore sklearn warnings def warn(*args, **kwargs): pass import warnings", "rewards, num_iter, episode_beginning, max_iters, epsilon if states is None: #", "take_action = sorted_list[0][1] else: # training phase: exploration randomly picks", "observation = env.reset() sum_reward = 0 for j in range(args.max_timesteps):", "print('Reward for episode '+ str(i)+' : '+str(episode_reward)) cumulative_reward_list.append(episode_reward) # env.render()", "the best action action_list = {} freq_list = {} for", "plt import argparse parser = argparse.ArgumentParser(description='KBRL with KNN') parser.add_argument('--episodes', nargs='?',", "done, _ = env.step(action) sum_reward += reward if done: break", "+= 1 if done: # end of episode: calculate the", "0 done = False cumulative_reward_list = [] for i in", "max_iters, epsilon if states is None: # first state observed", "probability epsilon *= exploration_decay # do not decay below 0", "+= reward if done: break episode_reward = episode_reward * 0.95", "state observed states = np.zeros((max_state_size, observation.size)) if num_iter > minimum_num_iters", "= sorted_list[0][1] else: # training phase: exploration randomly picks an", "exploitation # Uses k=500 nearest neighbors to pick the action", "state space size in continuous environments # learning-related variables states", "the highest reward nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters]) distances, indices = nbrs.kneighbors(observation)", "parser.add_argument('--max_timesteps', nargs='?', type=int, default=200) parser.add_argument('environment') args = parser.parse_args() env =", "def warn(*args, **kwargs): pass import warnings warnings.warn = warn reward", "first state observed states = np.zeros((max_state_size, observation.size)) if num_iter >", "number of times action occured and take action with highest", "max_state_size: states[num_iter] = observation # save the state actions[num_iter] =", "warnings warnings.warn = warn reward = 0 episode_reward = 0", "= False cumulative_reward_list = [] for i in range(args.episodes): observation", "action = make_move(observation, reward, done) observation, reward, done, _ =", "take action with highest value for act in action_list: action_list[act]", "for this episode val = 0 for t in reversed(range(episode_beginning,", "# end of episode: calculate the value function for this", "epsilon and values: # if amount of data is sufficient", "# because we don't know the state space size in", "action_list[act] / freq_list[act] sorted_list = [(y,x) for x,y in action_list.items()]", "action_list[act] = action_list[act] / freq_list[act] sorted_list = [(y,x) for x,y", "{} # episode-related variables episode_beginning = 0 def make_move(observation, reward,", "in continuous environments # learning-related variables states = None actions", "highest value for act in action_list: action_list[act] = action_list[act] /", "= action_list[act] / freq_list[act] sorted_list = [(y,x) for x,y in", "episode_reward = episode_reward * 0.95 + sum_reward * 0.05 print('Reward", "= reward # and the reward we obtained last time", "sorted_list.sort(reverse=True) take_action = sorted_list[0][1] else: # training phase: exploration randomly", "1 # normalize by number of times action occured and", "gym.make(args.environment).env action_space = env.action_space # hyperparameters: epsilon = 1.0 exploration_decay" ]
[ "es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name = \"clients\"", "Elasticsearch from elasticsearch import helpers import time, json, datetime, os", "{ \"transactions-\" + date: { \"properties\": { \"@dtime\": { \"type\":", "exception on create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR", "self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "}, \"bheight\": { \"type\": \"long\" } } } } }", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on send Block:\", es1)", "as es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name,", "return 0 except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on search", "}, \"balance\": { \"type\": \"float\" } } } } }", "}, \"user\": { \"type\": \"text\", \"fielddata\": True }, \"balance\": {", "es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)", "if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1:", "BLOCKS INDEX self.blocks_index_name = \"blocks-\" + date self.block_mapping = {", "\"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"ip\": { \"type\":", "{ \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"transactions-\" +", "{\"@dtime\": int(time.time()), \"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas)", "}, \"public_key\": { \"type\": \"text\", \"fielddata\": True }, \"client_type\": {", "as es1: print(\"Elastic exception on save bulk Transactions:\", es1) def", "= self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query) if not", "= balance.keys() jsonMas = [] print(\"USER LEN:\", len(users)) for user", "class elalog: def __init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>'", "on create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list):", "\"type\": \"text\" }, \"signatures\": { \"type\": \"text\" }, \"tcount\": {", "BALANCE HISTORY self.balance_index_name = \"balance\" self.balance_mapping = { \"settings\": {", "} if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as", "exception on save bulk Transactions:\", es1) def elasticBalanceHistory(self, balance:dict): users", "':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "\"date\", \"format\": \"epoch_second\" }, \"hash\": { \"type\": \"text\" }, \"signatures\":", "{ \"type\": \"date\", \"format\": \"epoch_second\" }, \"sender\": { \"type\": \"text\",", "+ date self.block_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\":", "} }, \"public_key\": { \"type\": \"text\", \"fielddata\": True }, \"client_type\":", "\"text\" }, \"location\": { \"type\": \"geo_point\" }, \"region_name\": { \"type\":", "self.balance_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 },", "date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or", "\"date\", \"format\": \"epoch_second\" }, \"user\": { \"type\": \"text\", \"fielddata\": True", "\"balance\": { \"type\": \"float\" } } } } } if", "\"fielddata\": True }, \"bheight\": { \"type\": \"long\" } } }", "\"type\": \"text\" }, \"block\": { \"type\": \"long\" } } }", "= os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate = date self.es = Elasticsearch([{'host':", "\"receiver\": { \"type\": \"text\", \"fielddata\": True }, \"token_count\": { \"type\":", "save balance:\", es1) def getLastEBlock(self): query = {\"aggs\" : {", "create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try:", "import Elasticsearch from elasticsearch import helpers import time, json, datetime,", "\"type\": \"text\", \"fielddata\": True }, \"receiver\": { \"type\": \"text\", \"fielddata\":", "{ \"type\": \"text\" }, \"continent_name\": { \"type\": \"text\" }, \"country_iso_code\":", "else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except", "{ \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" },", "\"long\" }, \"validator\": { \"type\": \"text\", \"fielddata\": True }, \"bheight\":", "for user in users: eljson = {\"_index\": \"balance\", \"_type\": \"balance\",", "print(\"Elastic exception on save balance:\", es1) def getLastEBlock(self): query =", "{ \"type\": \"date\", \"format\": \"epoch_second\" }, \"user\": { \"type\": \"text\",", "== None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except elasticsearch.ElasticsearchException as", "es_host, 'port': es_port}]) # BLOCKS INDEX self.blocks_index_name = \"blocks-\" +", "\"type\": \"text\" }, \"country_iso_code\": { \"type\": \"text\" }, \"location\": {", "{ \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"blocks-\" +", "5, \"number_of_replicas\": 0 }, \"mappings\": { \"clients\": { \"properties\": {", "\"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"sender\": { \"type\":", "}, \"signatures\": { \"type\": \"text\" }, \"tcount\": { \"type\": \"long\"", "\"city_name\": { \"type\": \"text\" }, \"continent_name\": { \"type\": \"text\" },", "} } } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except", "Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name =", "elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "on send Block:\", es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons)", "\"number_of_replicas\": 0 }, \"mappings\": { \"blocks-\" + date: { \"properties\":", "self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None:", "\"_type\": \"balance\", \"_id\": user, \"_source\": {\"@dtime\": int(time.time()), \"user\": user, \"balance\":", "create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name", "{ \"blocks-\" + date: { \"properties\": { \"@dtime\": { \"type\":", "print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) #", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save bulk Transactions:\", es1)", "# VALIDATOR STATISTIC self.clients_index_name = \"clients\" self.clients_mapping = { \"settings\":", "5, \"number_of_replicas\": 0 }, \"mappings\": { \"balance\": { \"properties\": {", "\"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"balance\": { \"properties\":", "elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int): index = 'blocks-'", "es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\"", "bulk Transactions:\", es1) def elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas", "} } } } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)", "\"fielddata\": True } } } } } if self.es.indices.exists(self.clients_index_name): try:", "self.clients_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 },", "self.es = Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS INDEX self.blocks_index_name", "\"tcount\": { \"type\": \"long\" }, \"validator\": { \"type\": \"text\", \"fielddata\":", "\"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"blocks-\" + date:", "\"fielddata\": True }, \"receiver\": { \"type\": \"text\", \"fielddata\": True },", "0 }, \"mappings\": { \"balance\": { \"properties\": { \"@dtime\": {", "import helpers import time, json, datetime, os class elalog: def", "if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0", "{ \"type\": \"long\" } } } } } if self.es.indices.exists(self.transactions_index_name):", "\"number_of_replicas\": 0 }, \"mappings\": { \"clients\": { \"properties\": { \"@dtime\":", "create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name", "try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name = \"clients\" self.clients_mapping", "}, \"validator\": { \"type\": \"text\", \"fielddata\": True }, \"bheight\": {", "\"location\": { \"type\": \"geo_point\" }, \"region_name\": { \"type\": \"text\" }", "= 'blocks-' + self.lastDate estype = 'blocks-' + self.lastDate eljson", "import elasticsearch from elasticsearch import Elasticsearch from elasticsearch import helpers", "\"type\": \"text\", \"fielddata\": True }, \"token_count\": { \"type\": \"float\" },", "self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "self.lastDate estype = 'blocks-' + self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp),", "VALIDATOR STATISTIC self.clients_index_name = \"clients\" self.clients_mapping = { \"settings\": {", "print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def", "+ self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\": tcount,", "signatures:list, hash:str, bheight:int): index = 'blocks-' + self.lastDate estype =", "user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as", "\"type\": \"date\", \"format\": \"epoch_second\" }, \"user\": { \"type\": \"text\", \"fielddata\":", "\"user\": { \"type\": \"text\", \"fielddata\": True }, \"balance\": { \"type\":", "balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "\"epoch_second\" }, \"hash\": { \"type\": \"text\" }, \"signatures\": { \"type\":", "Block:\", es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException", "}, \"sender\": { \"type\": \"text\", \"fielddata\": True }, \"receiver\": {", "elalog: def __init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port", "print(\"Elastic exception on save Validators:\", es1) print(\"Save Validators in elastic!\")", "\"geoip\": { \"properties\": { \"city_name\": { \"type\": \"text\" }, \"continent_name\":", "= Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS INDEX self.blocks_index_name =", "\"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"transactions-\" + date:", "as es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name,", "self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as", "Transactions:\", es1) def elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas =", "} if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as", "not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except", "= \"balance\" self.balance_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\":", "\"balance\": { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\"", "self.balance_index_name = \"balance\" self.balance_mapping = { \"settings\": { \"number_of_shards\": 5,", "index = 'blocks-' + self.lastDate estype = 'blocks-' + self.lastDate", "send Block:\", es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except", "on create Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC", "try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "\"date\", \"format\": \"epoch_second\" }, \"ip\": { \"type\": \"ip\" }, \"geoip\":", "}, \"tcount\": { \"type\": \"long\" }, \"validator\": { \"type\": \"text\",", "\"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"hash\":", "{ \"type\": \"text\" }, \"block\": { \"type\": \"long\" } }", "\"sender\": { \"type\": \"text\", \"fielddata\": True }, \"receiver\": { \"type\":", "\"number_of_replicas\": 0 }, \"mappings\": { \"balance\": { \"properties\": { \"@dtime\":", "self.clients_index_name = \"clients\" self.clients_mapping = { \"settings\": { \"number_of_shards\": 5,", "self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name = \"clients\" self.clients_mapping =", "{ \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try: answer = self.es.search(index=\"blocks-\" +", "exception on save balance:\", es1) def getLastEBlock(self): query = {\"aggs\"", "try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "\"hash\": { \"type\": \"text\" }, \"block\": { \"type\": \"long\" }", "on create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY", "\"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException", "print(\"Elastic exception on save bulk Transactions:\", es1) def elasticBalanceHistory(self, balance:dict):", "print(\"USER LEN:\", len(users)) for user in users: eljson = {\"_index\":", "es1) def elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas = []", "} if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as", "jsonMas) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save balance:\",", "doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on send", "= { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\":", "\"text\" }, \"block\": { \"type\": \"long\" } } } }", "def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1:", "es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons)", "{ \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"clients\": {", "exception on create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self,", "try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name", "LEN:\", len(users)) for user in users: eljson = {\"_index\": \"balance\",", "jsonMas = [] print(\"USER LEN:\", len(users)) for user in users:", "def getLastEBlock(self): query = {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0", "STATISTIC self.clients_index_name = \"clients\" self.clients_mapping = { \"settings\": { \"number_of_shards\":", "{ \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"balance\": {", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on send Block:\", es1) def", "}, \"block\": { \"type\": \"long\" } } } } }", "self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else:", "\"type\": \"text\" }, \"tcount\": { \"type\": \"long\" }, \"validator\": {", "\"type\": \"text\", \"fielddata\": True }, \"hash\": { \"type\": \"text\" },", "{ \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"ip\": {", "self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name = \"balance\" self.balance_mapping =", "users = balance.keys() jsonMas = [] print(\"USER LEN:\", len(users)) for", "\"epoch_second\" }, \"sender\": { \"type\": \"text\", \"fielddata\": True }, \"receiver\":", "hash, \"bheight\": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except", "def elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas = [] print(\"USER", "os class elalog: def __init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or", "{ \"type\": \"long\" } } } } } if self.es.indices.exists(self.blocks_index_name):", "getLastEBlock(self): query = {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 }", "\"balance\", \"_id\": user, \"_source\": {\"@dtime\": int(time.time()), \"user\": user, \"balance\": balance.get(user)}}", "self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create", ": { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try: answer = self.es.search(index=\"blocks-\"", "} } } } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)", "0 }, \"mappings\": { \"transactions-\" + date: { \"properties\": {", "es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping)", "answer = self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query) if", "} } } } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)", "{ \"type\": \"text\", \"fielddata\": True }, \"client_type\": { \"type\": \"text\",", "= {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try: answer", "= \"transactions-\" + date self.transactions_mapping = { \"settings\": { \"number_of_shards\":", "self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "}, \"geoip\": { \"properties\": { \"city_name\": { \"type\": \"text\" },", "save bulk Transactions:\", es1) def elasticBalanceHistory(self, balance:dict): users = balance.keys()", "body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\" + date self.transactions_mapping", "\"type\": \"date\", \"format\": \"epoch_second\" }, \"sender\": { \"type\": \"text\", \"fielddata\":", "}, \"receiver\": { \"type\": \"text\", \"fielddata\": True }, \"token_count\": {", "\"format\": \"epoch_second\" }, \"user\": { \"type\": \"text\", \"fielddata\": True },", "\"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"blocks-\"", "} } } } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping)", "balance:dict): users = balance.keys() jsonMas = [] print(\"USER LEN:\", len(users))", "\"text\", \"fielddata\": True }, \"hash\": { \"type\": \"text\" }, \"block\":", "},\"size\": 0 } try: answer = self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\"", "True }, \"hash\": { \"type\": \"text\" }, \"block\": { \"type\":", "print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) #", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\", es1) else:", "es1) print(\"Save Validators in elastic!\") def elasticBlock(self, timestamp:float, validator:str, tcount:int,", "HISTORY self.balance_index_name = \"balance\" self.balance_mapping = { \"settings\": { \"number_of_shards\":", "es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping)", "\"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"transactions-\"", "= \"clients\" self.clients_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\":", "{ \"type\": \"long\" }, \"validator\": { \"type\": \"text\", \"fielddata\": True", "+ self.lastDate estype = 'blocks-' + self.lastDate eljson = json.dumps({\"@dtime\":", "\"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"hash\": { \"type\":", "int(timestamp), \"validator\": validator, \"tcount\": tcount, \"signatures\": list(signatures), \"hash\": hash, \"bheight\":", "0 } try: answer = self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" +", "print(\"Elastic exception on send Block:\", es1) def elasticTransaction(self, jsons:list): try:", "elasticsearch import Elasticsearch from elasticsearch import helpers import time, json,", "+ self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] ==", "exception on send Block:\", es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es,", "\"_id\": user, \"_source\": {\"@dtime\": int(time.time()), \"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson)", "jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save bulk", "\"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"clients\":", "\"text\" } } }, \"public_key\": { \"type\": \"text\", \"fielddata\": True", "\"mappings\": { \"balance\": { \"properties\": { \"@dtime\": { \"type\": \"date\",", "\"clients\" self.clients_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0", "len(users)) for user in users: eljson = {\"_index\": \"balance\", \"_type\":", "query = {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try:", "\"type\": \"text\", \"fielddata\": True } } } } } if", "{ \"type\": \"geo_point\" }, \"region_name\": { \"type\": \"text\" } }", "= date self.es = Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS", "\"date\", \"format\": \"epoch_second\" }, \"sender\": { \"type\": \"text\", \"fielddata\": True", "\"epoch_second\" }, \"user\": { \"type\": \"text\", \"fielddata\": True }, \"balance\":", "on save balance:\", es1) def getLastEBlock(self): query = {\"aggs\" :", "TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\" + date self.transactions_mapping = {", "on save bulk Transactions:\", es1) def elasticBalanceHistory(self, balance:dict): users =", "date self.block_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0", "\"validator\": validator, \"tcount\": tcount, \"signatures\": list(signatures), \"hash\": hash, \"bheight\": bheight},", "es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name = \"balance\"", "5, \"number_of_replicas\": 0 }, \"mappings\": { \"blocks-\" + date: {", "{ \"type\": \"float\" } } } } } if self.es.indices.exists(self.balance_index_name):", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on search last block", "\"ip\": { \"type\": \"ip\" }, \"geoip\": { \"properties\": { \"city_name\":", "Indicies:\", es1) else: self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name =", "0 except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on search last", "{ \"type\": \"text\" }, \"location\": { \"type\": \"geo_point\" }, \"region_name\":", "self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "\"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"user\":", "self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException", "{ \"type\": \"ip\" }, \"geoip\": { \"properties\": { \"city_name\": {", "}, \"hash\": { \"type\": \"text\" }, \"signatures\": { \"type\": \"text\"", "\"text\" }, \"continent_name\": { \"type\": \"text\" }, \"country_iso_code\": { \"type\":", "\"public_key\": { \"type\": \"text\", \"fielddata\": True }, \"client_type\": { \"type\":", "helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save", "\"long\" } } } } } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name)", "as es1: print(\"Elastic exception on send Block:\", es1) def elasticTransaction(self,", "\"fielddata\": True }, \"hash\": { \"type\": \"text\" }, \"block\": {", "from elasticsearch import Elasticsearch from elasticsearch import helpers import time,", "self.transactions_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 },", "\"long\" } } } } } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name)", "} } } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except", "os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate = date self.es = Elasticsearch([{'host': es_host,", "= \"blocks-\" + date self.block_mapping = { \"settings\": { \"number_of_shards\":", "json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\": tcount, \"signatures\": list(signatures), \"hash\": hash,", "\"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"clients\": { \"properties\":", "\"fielddata\": True }, \"balance\": { \"type\": \"float\" } } }", "else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\" +", "self.block_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 },", "as es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name,", "} } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException", "\"format\": \"epoch_second\" }, \"sender\": { \"type\": \"text\", \"fielddata\": True },", "estype = 'blocks-' + self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\":", "+ self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"])", "}, \"token_type\": { \"type\": \"text\", \"fielddata\": True }, \"hash\": {", "self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create", "body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\",", "es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200'", "{ \"type\": \"text\", \"fielddata\": True }, \"token_count\": { \"type\": \"float\"", "\"hash\": hash, \"bheight\": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson)", "}, \"token_count\": { \"type\": \"float\" }, \"token_type\": { \"type\": \"text\",", "\"block\": { \"type\": \"long\" } } } } } if", "es1) def elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as", "self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "body=self.balance_mapping) # VALIDATOR STATISTIC self.clients_index_name = \"clients\" self.clients_mapping = {", "} } }, \"public_key\": { \"type\": \"text\", \"fielddata\": True },", "Indicies:\", es1) else: self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es,", "balance.keys() jsonMas = [] print(\"USER LEN:\", len(users)) for user in", "{ \"type\": \"text\" }, \"country_iso_code\": { \"type\": \"text\" }, \"location\":", "answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except elasticsearch.ElasticsearchException", "if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1:", "\"float\" }, \"token_type\": { \"type\": \"text\", \"fielddata\": True }, \"hash\":", "\"format\": \"epoch_second\" }, \"hash\": { \"type\": \"text\" }, \"signatures\": {", "\"bheight\": { \"type\": \"long\" } } } } } if", "\"signatures\": list(signatures), \"hash\": hash, \"bheight\": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(),", "elasticsearch from elasticsearch import Elasticsearch from elasticsearch import helpers import", "exception on save Validators:\", es1) print(\"Save Validators in elastic!\") def", "user, \"_source\": {\"@dtime\": int(time.time()), \"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try:", "\"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1:", "\"signatures\": { \"type\": \"text\" }, \"tcount\": { \"type\": \"long\" },", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save balance:\", es1) def", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\", es1)", "\"type\": \"long\" }, \"validator\": { \"type\": \"text\", \"fielddata\": True },", "es_port}]) # BLOCKS INDEX self.blocks_index_name = \"blocks-\" + date self.block_mapping", "{ \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"sender\": {", "jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1:", "self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "in elastic!\") def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int):", "elasticTransaction(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "es1) def getLastEBlock(self): query = {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\":", "\"text\", \"fielddata\": True } } } } } if self.es.indices.exists(self.clients_index_name):", "validator:str, tcount:int, signatures:list, hash:str, bheight:int): index = 'blocks-' + self.lastDate", "self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create", "{ \"type\": \"text\" } } }, \"public_key\": { \"type\": \"text\",", "else: return 0 except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "{ \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"user\": {", "self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\" + date", "exception on create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE", "\"balance\", \"_type\": \"balance\", \"_id\": user, \"_source\": {\"@dtime\": int(time.time()), \"user\": user,", "as es1: print(\"Elastic exception on search last block index:\", es1)", "{ \"type\": \"text\", \"fielddata\": True }, \"receiver\": { \"type\": \"text\",", "in users: eljson = {\"_index\": \"balance\", \"_type\": \"balance\", \"_id\": user,", "try: answer = self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query)", "elastic!\") def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int): index", "}, \"hash\": { \"type\": \"text\" }, \"block\": { \"type\": \"long\"", "\"balance\" self.balance_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0", "list(signatures), \"hash\": hash, \"bheight\": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(),", "None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except elasticsearch.ElasticsearchException as es1:", "} } } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except", "\"text\", \"fielddata\": True }, \"balance\": { \"type\": \"float\" } }", "'blocks-' + self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\":", "\"mappings\": { \"transactions-\" + date: { \"properties\": { \"@dtime\": {", "}, \"country_iso_code\": { \"type\": \"text\" }, \"location\": { \"type\": \"geo_point\"", "body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\",", "elasticBalanceHistory(self, balance:dict): users = balance.keys() jsonMas = [] print(\"USER LEN:\",", "{ \"type\": \"text\", \"fielddata\": True }, \"bheight\": { \"type\": \"long\"", "balance:\", es1) def getLastEBlock(self): query = {\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}}", "es1: print(\"Elastic exception on send Block:\", es1) def elasticTransaction(self, jsons:list):", "on create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS INDEX", "{ \"type\": \"text\" }, \"signatures\": { \"type\": \"text\" }, \"tcount\":", "def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int): index =", "{ \"type\": \"text\", \"fielddata\": True }, \"hash\": { \"type\": \"text\"", "{ \"clients\": { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\":", "True }, \"bheight\": { \"type\": \"long\" } } } }", "# BLOCKS INDEX self.blocks_index_name = \"blocks-\" + date self.block_mapping =", "\"type\": \"float\" } } } } } if self.es.indices.exists(self.balance_index_name): try:", "\"type\": \"date\", \"format\": \"epoch_second\" }, \"hash\": { \"type\": \"text\" },", "body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return", "\"mappings\": { \"clients\": { \"properties\": { \"@dtime\": { \"type\": \"date\",", "jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save Validators:\",", "eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\": tcount, \"signatures\": list(signatures),", "es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate = date self.es =", "\"text\", \"fielddata\": True }, \"client_type\": { \"type\": \"text\", \"fielddata\": True", "Validators in elastic!\") def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list, hash:str,", "}, \"continent_name\": { \"type\": \"text\" }, \"country_iso_code\": { \"type\": \"text\"", "\"type\": \"text\" }, \"location\": { \"type\": \"geo_point\" }, \"region_name\": {", "save Validators:\", es1) print(\"Save Validators in elastic!\") def elasticBlock(self, timestamp:float,", "\"number_of_replicas\": 0 }, \"mappings\": { \"transactions-\" + date: { \"properties\":", "Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name =", "\"country_iso_code\": { \"type\": \"text\" }, \"location\": { \"type\": \"geo_point\" },", "{ \"balance\": { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\":", "+ date: { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\":", "}, \"region_name\": { \"type\": \"text\" } } }, \"public_key\": {", "elasticsearch import helpers import time, json, datetime, os class elalog:", "{ \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"hash\": {", "\"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"ip\":", "}, \"mappings\": { \"transactions-\" + date: { \"properties\": { \"@dtime\":", "}, \"mappings\": { \"blocks-\" + date: { \"properties\": { \"@dtime\":", "INDEX self.transactions_index_name = \"transactions-\" + date self.transactions_mapping = { \"settings\":", "Validators:\", es1) print(\"Save Validators in elastic!\") def elasticBlock(self, timestamp:float, validator:str,", "\"type\": \"text\", \"fielddata\": True }, \"bheight\": { \"type\": \"long\" }", "'port': es_port}]) # BLOCKS INDEX self.blocks_index_name = \"blocks-\" + date", "\"type\": \"text\" } } }, \"public_key\": { \"type\": \"text\", \"fielddata\":", "} } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException", "doc_type=\"blocks-\" + self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"] == None: return", "{ \"properties\": { \"city_name\": { \"type\": \"text\" }, \"continent_name\": {", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on search last block index:\",", "# TRANSACTIONS INDEX self.transactions_index_name = \"transactions-\" + date self.transactions_mapping =", "\"client_type\": { \"type\": \"text\", \"fielddata\": True } } } }", "print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) #", "{ \"city_name\": { \"type\": \"text\" }, \"continent_name\": { \"type\": \"text\"", "\"float\" } } } } } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name)", "\"transactions-\" + date self.transactions_mapping = { \"settings\": { \"number_of_shards\": 5,", "date self.transactions_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0", "\"text\", \"fielddata\": True }, \"token_count\": { \"type\": \"float\" }, \"token_type\":", "True }, \"client_type\": { \"type\": \"text\", \"fielddata\": True } }", "separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1:", "\"text\" }, \"signatures\": { \"type\": \"text\" }, \"tcount\": { \"type\":", "self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate = date", "\"validator\": { \"type\": \"text\", \"fielddata\": True }, \"bheight\": { \"type\":", "\"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"sender\":", "} try: answer = self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" + self.lastDate,", "\"region_name\": { \"type\": \"text\" } } }, \"public_key\": { \"type\":", "'<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate = date self.es", "hash:str, bheight:int): index = 'blocks-' + self.lastDate estype = 'blocks-'", "from elasticsearch import helpers import time, json, datetime, os class", "tcount, \"signatures\": list(signatures), \"hash\": hash, \"bheight\": bheight}, separators=(',', ':')) try:", "def __init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port =", "= 'blocks-' + self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\": validator,", "} } } } } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name,", "eljson = {\"_index\": \"balance\", \"_type\": \"balance\", \"_id\": user, \"_source\": {\"@dtime\":", "}, \"mappings\": { \"balance\": { \"properties\": { \"@dtime\": { \"type\":", "es1: print(\"Elastic exception on save balance:\", es1) def getLastEBlock(self): query", "\"clients\": { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\"", "body=eljson) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on send Block:\",", "= [] print(\"USER LEN:\", len(users)) for user in users: eljson", "try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "timestamp:float, validator:str, tcount:int, signatures:list, hash:str, bheight:int): index = 'blocks-' +", "\"token_type\": { \"type\": \"text\", \"fielddata\": True }, \"hash\": { \"type\":", "\"fielddata\": True }, \"token_count\": { \"type\": \"float\" }, \"token_type\": {", "es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping)", "def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1:", "date: { \"properties\": { \"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\"", "__init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\")", "\"type\": \"ip\" }, \"geoip\": { \"properties\": { \"city_name\": { \"type\":", "validator, \"tcount\": tcount, \"signatures\": list(signatures), \"hash\": hash, \"bheight\": bheight}, separators=(',',", "\"mappings\": { \"blocks-\" + date: { \"properties\": { \"@dtime\": {", "\"epoch_second\" }, \"ip\": { \"type\": \"ip\" }, \"geoip\": { \"properties\":", "datetime, os class elalog: def __init__(self, date): es_host = os.getenv(\"ES_PORT_9200_TCP_ADDR\")", "date self.es = Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS INDEX", "on save Validators:\", es1) print(\"Save Validators in elastic!\") def elasticBlock(self,", "\"text\" }, \"tcount\": { \"type\": \"long\" }, \"validator\": { \"type\":", "{\"aggs\" : { \"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try: answer =", "try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "\"blocks-\" + date: { \"properties\": { \"@dtime\": { \"type\": \"date\",", "True }, \"token_count\": { \"type\": \"float\" }, \"token_type\": { \"type\":", "= os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate", "{ \"type\": \"text\", \"fielddata\": True } } } } }", "} } } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except", "else: self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name = \"balance\" self.balance_mapping", "elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save Validators:\", es1) print(\"Save", "0 }, \"mappings\": { \"clients\": { \"properties\": { \"@dtime\": {", "Elasticsearch([{'host': es_host, 'port': es_port}]) # BLOCKS INDEX self.blocks_index_name = \"blocks-\"", "{ \"type\": \"text\" }, \"tcount\": { \"type\": \"long\" }, \"validator\":", "} } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException", "\"properties\": { \"city_name\": { \"type\": \"text\" }, \"continent_name\": { \"type\":", "if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name) self.es.indices.create(index=self.clients_index_name, body=self.clients_mapping) except elasticsearch.ElasticsearchException as es1:", "{ \"type\": \"date\", \"format\": \"epoch_second\" }, \"hash\": { \"type\": \"text\"", "0 }, \"mappings\": { \"blocks-\" + date: { \"properties\": {", "= json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\": tcount, \"signatures\": list(signatures), \"hash\":", "\"@dtime\": { \"type\": \"date\", \"format\": \"epoch_second\" }, \"user\": { \"type\":", "'9200' self.lastDate = date self.es = Elasticsearch([{'host': es_host, 'port': es_port}])", "body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\",", "}, \"client_type\": { \"type\": \"text\", \"fielddata\": True } } }", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save bulk Transactions:\",", "} if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) except elasticsearch.ElasticsearchException as", "return int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except elasticsearch.ElasticsearchException as es1: print(\"Elastic", "{ \"type\": \"date\", \"format\": \"epoch_second\" }, \"ip\": { \"type\": \"ip\"", "int(answer[\"aggregations\"][\"max_blnum\"][\"value\"]) else: return 0 except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "\"geo_point\" }, \"region_name\": { \"type\": \"text\" } } }, \"public_key\":", "body=self.transactions_mapping) # BALANCE HISTORY self.balance_index_name = \"balance\" self.balance_mapping = {", "try: helpers.bulk(self.es, jsonMas) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "\"fielddata\": True }, \"client_type\": { \"type\": \"text\", \"fielddata\": True }", "\"type\": \"long\" } } } } } if self.es.indices.exists(self.blocks_index_name): try:", "int(time.time()), \"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es, jsonMas) except", "es1: print(\"Elastic exception on save bulk Transactions:\", es1) def elasticBalanceHistory(self,", "self.lastDate = date self.es = Elasticsearch([{'host': es_host, 'port': es_port}]) #", "self.transactions_index_name = \"transactions-\" + date self.transactions_mapping = { \"settings\": {", "}, \"mappings\": { \"clients\": { \"properties\": { \"@dtime\": { \"type\":", "as es1: print(\"Elastic exception on create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name,", "\"type\": \"float\" }, \"token_type\": { \"type\": \"text\", \"fielddata\": True },", "} } } } } if self.es.indices.exists(self.balance_index_name): try: self.es.indices.delete(index=self.balance_index_name) self.es.indices.create(index=self.balance_index_name,", "user in users: eljson = {\"_index\": \"balance\", \"_type\": \"balance\", \"_id\":", "} } } } } if self.es.indices.exists(self.blocks_index_name): try: self.es.indices.delete(index=self.blocks_index_name) self.es.indices.create(index=self.blocks_index_name,", "body=self.transactions_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create Indicies:\",", "\"max_blnum\":{\"max\":{\"field\":\"bheight\"}} },\"size\": 0 } try: answer = self.es.search(index=\"blocks-\" + self.lastDate,", "{ \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": {", "self.es.indices.create(index=self.balance_index_name, body=self.balance_mapping) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on create", "# BALANCE HISTORY self.balance_index_name = \"balance\" self.balance_mapping = { \"settings\":", "\"type\": \"geo_point\" }, \"region_name\": { \"type\": \"text\" } } },", "time, json, datetime, os class elalog: def __init__(self, date): es_host", "\"transactions-\" + date: { \"properties\": { \"@dtime\": { \"type\": \"date\",", "True }, \"receiver\": { \"type\": \"text\", \"fielddata\": True }, \"token_count\":", "'blocks-' + self.lastDate estype = 'blocks-' + self.lastDate eljson =", "5, \"number_of_replicas\": 0 }, \"mappings\": { \"transactions-\" + date: {", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save Validators:\", es1)", "True } } } } } if self.es.indices.exists(self.clients_index_name): try: self.es.indices.delete(index=self.clients_index_name)", "\"blocks-\" + date self.block_mapping = { \"settings\": { \"number_of_shards\": 5,", "} } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name, body=self.transactions_mapping) except elasticsearch.ElasticsearchException", "\"type\": \"text\" }, \"continent_name\": { \"type\": \"text\" }, \"country_iso_code\": {", "os.getenv(\"ES_PORT_9200_TCP_ADDR\") or '<%ELASTICIP%>' es_port = os.getenv(\"ES_PORT_9200_TCP_PORT\") or '9200' self.lastDate =", "bheight:int): index = 'blocks-' + self.lastDate estype = 'blocks-' +", "import time, json, datetime, os class elalog: def __init__(self, date):", "\"text\", \"fielddata\": True }, \"receiver\": { \"type\": \"text\", \"fielddata\": True", "\"text\", \"fielddata\": True }, \"bheight\": { \"type\": \"long\" } }", "[] print(\"USER LEN:\", len(users)) for user in users: eljson =", "\"format\": \"epoch_second\" }, \"ip\": { \"type\": \"ip\" }, \"geoip\": {", "}, \"location\": { \"type\": \"geo_point\" }, \"region_name\": { \"type\": \"text\"", "True }, \"balance\": { \"type\": \"float\" } } } }", "\"hash\": { \"type\": \"text\" }, \"signatures\": { \"type\": \"text\" },", "\"type\": \"text\", \"fielddata\": True }, \"balance\": { \"type\": \"float\" }", "\"type\": \"text\", \"fielddata\": True }, \"client_type\": { \"type\": \"text\", \"fielddata\":", "\"tcount\": tcount, \"signatures\": list(signatures), \"hash\": hash, \"bheight\": bheight}, separators=(',', ':'))", "body=self.clients_mapping) def elasticClients(self, jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as", "users: eljson = {\"_index\": \"balance\", \"_type\": \"balance\", \"_id\": user, \"_source\":", "\"ip\" }, \"geoip\": { \"properties\": { \"city_name\": { \"type\": \"text\"", "{ \"type\": \"text\", \"fielddata\": True }, \"balance\": { \"type\": \"float\"", "tcount:int, signatures:list, hash:str, bheight:int): index = 'blocks-' + self.lastDate estype", "\"token_count\": { \"type\": \"float\" }, \"token_type\": { \"type\": \"text\", \"fielddata\":", "{\"_index\": \"balance\", \"_type\": \"balance\", \"_id\": user, \"_source\": {\"@dtime\": int(time.time()), \"user\":", "\"text\" }, \"country_iso_code\": { \"type\": \"text\" }, \"location\": { \"type\":", "= {\"_index\": \"balance\", \"_type\": \"balance\", \"_id\": user, \"_source\": {\"@dtime\": int(time.time()),", "as es1: print(\"Elastic exception on save Validators:\", es1) print(\"Save Validators", "print(\"Save Validators in elastic!\") def elasticBlock(self, timestamp:float, validator:str, tcount:int, signatures:list,", "self.lastDate eljson = json.dumps({\"@dtime\": int(timestamp), \"validator\": validator, \"tcount\": tcount, \"signatures\":", "es1: print(\"Elastic exception on save Validators:\", es1) print(\"Save Validators in", "{ \"type\": \"float\" }, \"token_type\": { \"type\": \"text\", \"fielddata\": True", "exception on create Indicies:\", es1) else: self.es.indices.create(index=self.blocks_index_name, body=self.block_mapping) # TRANSACTIONS", "json, datetime, os class elalog: def __init__(self, date): es_host =", "\"type\": \"date\", \"format\": \"epoch_second\" }, \"ip\": { \"type\": \"ip\" },", "self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on", "}, \"ip\": { \"type\": \"ip\" }, \"geoip\": { \"properties\": {", "\"_source\": {\"@dtime\": int(time.time()), \"user\": user, \"balance\": balance.get(user)}} jsonMas.append(eljson) try: helpers.bulk(self.es,", "\"bheight\": bheight}, separators=(',', ':')) try: self.es.index(index=str(index).lower(), doc_type=estype.lower(), body=eljson) except elasticsearch.ElasticsearchException", "+ date self.transactions_mapping = { \"settings\": { \"number_of_shards\": 5, \"number_of_replicas\":", "INDEX self.blocks_index_name = \"blocks-\" + date self.block_mapping = { \"settings\":", "} } } } } if self.es.indices.exists(self.transactions_index_name): try: self.es.indices.delete(index=self.transactions_index_name) self.es.indices.create(index=self.transactions_index_name,", "except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save balance:\", es1)", "helpers import time, json, datetime, os class elalog: def __init__(self,", "jsons:list): try: helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception", "as es1: print(\"Elastic exception on save balance:\", es1) def getLastEBlock(self):", "\"continent_name\": { \"type\": \"text\" }, \"country_iso_code\": { \"type\": \"text\" },", "self.blocks_index_name = \"blocks-\" + date self.block_mapping = { \"settings\": {", "self.es.search(index=\"blocks-\" + self.lastDate, doc_type=\"blocks-\" + self.lastDate, body=query) if not answer[\"aggregations\"][\"max_blnum\"][\"value\"]", "\"type\": \"long\" } } } } } if self.es.indices.exists(self.transactions_index_name): try:", "helpers.bulk(self.es, jsons) except elasticsearch.ElasticsearchException as es1: print(\"Elastic exception on save", "or '9200' self.lastDate = date self.es = Elasticsearch([{'host': es_host, 'port':", "\"settings\": { \"number_of_shards\": 5, \"number_of_replicas\": 0 }, \"mappings\": { \"balance\":" ]
[ "vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from util import", "ai ts=4 sts=4 et sw=4 encoding=utf-8 from util import clean_phone_number,", "util import clean_phone_number, clean_outgoing_sms_text from django.test import TestCase class UtilTestCase(TestCase):", "a test شسیبشسی\" cleaned = clean_outgoing_sms_text(text) # make sure '+'", "django.test import TestCase class UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self):", "phone_number = \" 324 23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\")", "clean_phone_number, clean_outgoing_sms_text from django.test import TestCase class UtilTestCase(TestCase): def setUp(self):", "setUp(self): pass def testCleanPhoneNumber(self): phone_number = \" 324 23-23421241\" cleaned", "TestCase class UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self): phone_number =", "23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text =", "ts=4 sts=4 et sw=4 encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text", "clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text = u\"+this is a", "def testCleanOutgoingSMSText(self): text = u\"+this is a test شسیبشسی\" cleaned", "testCleanOutgoingSMSText(self): text = u\"+this is a test شسیبشسی\" cleaned =", "make sure '+' and unicode get encoded for GET properly", "self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text = u\"+this is a test", "= clean_outgoing_sms_text(text) # make sure '+' and unicode get encoded", "sts=4 et sw=4 encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text from", "encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text from django.test import TestCase", "شسیبشسی\" cleaned = clean_outgoing_sms_text(text) # make sure '+' and unicode", "cleaned = clean_outgoing_sms_text(text) # make sure '+' and unicode get", "324 23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text", "from django.test import TestCase class UtilTestCase(TestCase): def setUp(self): pass def", "sure '+' and unicode get encoded for GET properly self.assertEquals(cleaned,", "python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from", "= \" 324 23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def", "is a test شسیبشسی\" cleaned = clean_outgoing_sms_text(text) # make sure", "\" 324 23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self):", "cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text = u\"+this", "sw=4 encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text from django.test import", "# make sure '+' and unicode get encoded for GET", "\"+3242323421241\") def testCleanOutgoingSMSText(self): text = u\"+this is a test شسیبشسی\"", "def setUp(self): pass def testCleanPhoneNumber(self): phone_number = \" 324 23-23421241\"", "<reponame>dslowikowski/commcare-hq<filename>corehq/apps/sms/tests.py<gh_stars>1-10 #!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4", "def testCleanPhoneNumber(self): phone_number = \" 324 23-23421241\" cleaned = clean_phone_number(phone_number)", "text = u\"+this is a test شسیبشسی\" cleaned = clean_outgoing_sms_text(text)", "pass def testCleanPhoneNumber(self): phone_number = \" 324 23-23421241\" cleaned =", "et sw=4 encoding=utf-8 from util import clean_phone_number, clean_outgoing_sms_text from django.test", "#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8", "UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self): phone_number = \" 324", "clean_outgoing_sms_text(text) # make sure '+' and unicode get encoded for", "import TestCase class UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self): phone_number", "test شسیبشسی\" cleaned = clean_outgoing_sms_text(text) # make sure '+' and", "testCleanPhoneNumber(self): phone_number = \" 324 23-23421241\" cleaned = clean_phone_number(phone_number) self.assertEquals(cleaned,", "import clean_phone_number, clean_outgoing_sms_text from django.test import TestCase class UtilTestCase(TestCase): def", "= clean_phone_number(phone_number) self.assertEquals(cleaned, \"+3242323421241\") def testCleanOutgoingSMSText(self): text = u\"+this is", "'+' and unicode get encoded for GET properly self.assertEquals(cleaned, \"%2Bthis%20is%20a%20test%20%D8%B4%D8%B3%DB%8C%D8%A8%D8%B4%D8%B3%DB%8C\")", "u\"+this is a test شسیبشسی\" cleaned = clean_outgoing_sms_text(text) # make", "# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 from util", "clean_outgoing_sms_text from django.test import TestCase class UtilTestCase(TestCase): def setUp(self): pass", "from util import clean_phone_number, clean_outgoing_sms_text from django.test import TestCase class", "= u\"+this is a test شسیبشسی\" cleaned = clean_outgoing_sms_text(text) #", "class UtilTestCase(TestCase): def setUp(self): pass def testCleanPhoneNumber(self): phone_number = \"" ]
[ "range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)", "= atom_types rp = [x for x in itertools.product(*radial)] frag1_X", "atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs", "atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i,", "weights. biases: tf.Variable Initialized biases. \"\"\" if weights is None:", "weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None: biases", "dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy", "list List of atoms recognized by model. Atoms are indicated", "weights is None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases", "range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer)", "default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size def", "-1 return np.array([place_holder(x) for x in z]) for epoch in", "complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs):", "complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = []", "8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ],", "deterministic=True, pad_batches=True): batch_size = self.batch_size def replace_atom_types(z): def place_holder(i): if", "self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for", "name='b') return w, b class AtomicConvScore(Layer): \"\"\"The scoring function used", "self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types rp", "convolutions. The difference is that the \"graph\" here is the", "b = tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer): \"\"\"The", "batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size", "tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer", "np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i in", "complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom", "range(N_2): for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i,", "in range(epochs): for ind, (F_b, y_b, w_b, ids_b) in enumerate(", "tf.zeros([size]) w = tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return", "10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32,", "__init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8.,", "atom in range(N_2): for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom,", "frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs,", "np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i in", "for x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs =", "], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self,", "= tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x:", "[] frag2_atomtype_energy = [] complex_atomtype_energy = [] for ind, atomtype", "weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes] bias_init_consts", "= self.frag2_num_atoms M = self.max_num_neighbors batch_size = F_b.shape[0] num_features =", "this layer. weights: tf.Tensor, optional (Default None) Weight tensor. biases:", "5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5,", "for i in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size,", "in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z =", "\"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6,", "20., 25., 30., 35., 53., -1. ], radial=[[ 1.5, 2.0,", "nodes in this layer. weights: tf.Tensor, optional (Default None) Weight", "= tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy", "add description learning_rate: float Learning rate for the model. \"\"\"", "scoring function used by the atomic convolution models.\"\"\" def __init__(self,", "epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size def replace_atom_types(z): def", "in range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] =", "15., 16., 17., 20., 25., 30., 35., 53., -1. ],", "3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z", "frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))", "frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z", "inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z,", "= tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1)", "self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes = layer_sizes def build(self, input_shape):", "Atoms are indicated by their nuclear numbers. radial: list TODO:", "tf.Variable Initialized biases. \"\"\" if weights is None: weights =", "complex_atomtype_energy = [] for ind, atomtype in enumerate(atom_types): frag1_outputs =", "complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom in range(N): for", "] y_b = np.reshape(y_b, newshape=(batch_size, 1)) yield (inputs, [y_b], [w_b])", "weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]),", "int Number of atoms in sec max_num_neighbors: int Maximum number", "tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs", "place_holder(i): if i in self.atom_types: return i return -1 return", "1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z,", "be used in a fully-connected layer. Parameters ---------- prev_layer_size: int", "frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond =", "for epoch in range(epochs): for ind, (F_b, y_b, w_b, ids_b)", "optional (Default None) Bias tensor. name: str Name for this", "(F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)):", "atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)", "possible for an atom. Recall neighbors are spatial neighbors. atom_types:", "enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size,", "= [] complex_atomtype_energy = [] for ind, atomtype in enumerate(atom_types):", "complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy +", "return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution Model.", "= tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy", "i in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size, N,", "i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)]", "j] = complex_Z_b[i, atom_j] inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z,", "the atomic convolution models.\"\"\" def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore,", "9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0,", "in range(N_1): for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\")", "frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z,", "= tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None: biases =", "starting from the spatial geometry of the model. \"\"\" def", "shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size,", "np.zeros((batch_size, N_1, num_features)) for i in range(batch_size): frag1_X_b[i] = F_b[i][0]", "frag2_atomtype_energy = [] complex_atomtype_energy = [] for ind, atomtype in", "Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z", "sys from deepchem.models import KerasModel from deepchem.models.layers import AtomicConvolution from", "used in a fully-connected layer. Parameters ---------- prev_layer_size: int Number", "itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z", "i in self.atom_types: return i return -1 return np.array([place_holder(x) for", "Name for this op, optional (Defaults to 'fully_connected' if None)", "layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1 /", "atom_types: list List of atoms recognized by model. Atoms are", "frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ],", "F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j", "---------- prev_layer_size: int Number of features in previous layer. size:", "i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z", "biases=None, name=None): \"\"\"Initializes weights and biases to be used in", "len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]", "Parameters ---------- frag1_num_atoms: int Number of atoms in first fragment", "self.output_biases = [] n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers", "\"\") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in", "score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z", "in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z =", "= \"<NAME>\" __copyright__ = \"Copyright 2017, Stanford University\" __license__ =", "= [] self.output_biases = [] n_features = int(input_shape[0][-1]) layer_sizes =", "atom_types=[ 6, 7., 8., 9., 11., 12., 15., 16., 17.,", "range(N_1): for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i,", "np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] =", "= self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer =", "def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size", "atom_j] complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N))", "= frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b =", "frag2_z, complex_z = inputs atom_types = self.atom_types num_layers = len(self.layer_sizes)", "self._complex_conv, frag1_z, frag2_z, complex_z ]) model = tf.keras.Model( inputs=[ frag1_X,", ":len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom,", "for ind, atomtype in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([])", "self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types", "Turning off queue for now. Safe to re-activate? self.complex_num_atoms =", "Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))", "Convolution Model. Implements the atomic convolutional networks as introduced in", "max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs =", "def call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z =", "frag1_z, frag2_z, complex_z = inputs atom_types = self.atom_types num_layers =", "M)) complex_Z_b = np.zeros((batch_size, N)) for i in range(batch_size): z", "x in layer_sizes] bias_init_consts = [0.0] * num_layers for ind,", "= Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms,", "x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms,", "2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,", "tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append(", "= replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))", "self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z", "layer. size: int Number of nodes in this layer. weights:", "= Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,))", "30., 35., 53., -1. ], radial=[[ 1.5, 2.0, 2.5, 3.0,", "= np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N)) for i", "learning_rate: float Learning rate for the model. \"\"\" # TODO:", "frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom in range(N_1): for", "np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] =", "\"\"\"Initializes weights and biases to be used in a fully-connected", "features in previous layer. size: int Number of nodes in", "range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)", "for an atom. Recall neighbors are spatial neighbors. atom_types: list", "= Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3))", "complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size, 1))", "= frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types =", "for i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z", "difference is that the \"graph\" here is the nearest neighbors", "predict energetic state starting from the spatial geometry of the", "from deepchem.models.layers import AtomicConvolution from deepchem.models.losses import L2Loss from tensorflow.keras.layers", "self.layer_sizes = layer_sizes def build(self, input_shape): self.type_weights = [] self.type_biases", "input_shape): self.type_weights = [] self.type_biases = [] self.output_weights = []", "weights: tf.Tensor, optional (Default None) Weight tensor. biases: tf.Tensor, optional", "Number of atoms in sec max_num_neighbors: int Maximum number of", "Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X", "frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size", "frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda", "num_features)) for i in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b =", "numpy as np import tensorflow as tf import itertools def", "i in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1,", "dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size def replace_atom_types(z):", "frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom", "tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy =", "is that the \"graph\" here is the nearest neighbors graph", "space to train models that learn to predict energetic state", "6, 7., 8., 9., 11., 12., 15., 16., 17., 20.,", "layer. Parameters ---------- prev_layer_size: int Number of features in previous", "biases: tf.Variable Initialized biases. \"\"\" if weights is None: weights", "in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs", "frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy =", "convolutional networks for predicting protein-ligand binding affinity.\" arXiv preprint arXiv:1703.10603", "5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0,", "rate for the model. \"\"\" # TODO: Turning off queue", "dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1 = self.frag1_num_atoms", "N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms M = self.max_num_neighbors batch_size", "for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i,", "used by the atomic convolution models.\"\"\" def __init__(self, atom_types, layer_sizes,", "= AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types,", "in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] inputs =", "prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight)", "as tf import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None):", "that learn to predict energetic state starting from the spatial", "frag1_num_atoms: int Number of atoms in first fragment frag2_num_atoms: int", "frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2,", "b class AtomicConvScore(Layer): \"\"\"The scoring function used by the atomic", "in range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] =", "pad_batches=pad_batches)): N = self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms", "Number of features in previous layer. size: int Number of", "tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy + frag2_energy) return", "frag2_Z_b = np.zeros((batch_size, N_2)) for i in range(batch_size): z =", "self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ]) model = tf.keras.Model(", "Model. Implements the atomic convolutional networks as introduced in <NAME>", "complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs)", "outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset,", "frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs", "batch_size self.atom_types = atom_types rp = [x for x in", "range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]],", "None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None:", "radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv,", "np.zeros((batch_size, N_1, M)) for atom in range(N_1): for i in", "by model. Atoms are indicated by their nuclear numbers. radial:", "enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1 =", "= Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,))", "= self.max_num_neighbors batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b =", "def place_holder(i): if i in self.atom_types: return i return -1", "layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer", "j] = frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b", "Input, Layer import numpy as np import tensorflow as tf", "ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond", "(frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an", "the nearest neighbors graph in 3D space. The AtomicConvModel leverages", "= np.zeros((batch_size, N_1, num_features)) for i in range(batch_size): frag1_X_b[i] =", "complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy)", "The difference is that the \"graph\" here is the nearest", "geometry of the model. \"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701,", "frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])", "deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 =", "z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom in range(N):", "complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] inputs = [ frag1_X_b,", "= Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])", "N)) for i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] =", "11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16],", "that the \"graph\" here is the nearest neighbors graph in", "replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for", "tensorflow as tf import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None,", "= [1 / np.sqrt(x) for x in layer_sizes] bias_init_consts =", "radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5,", "F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for", "\"Copyright 2017, Stanford University\" __license__ = \"MIT\" import sys from", "frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i in range(batch_size): frag1_X_b[i]", "layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self,", "= self.batch_size def replace_atom_types(z): def place_holder(i): if i in self.atom_types:", "9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,", "M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i in range(batch_size): z", "]) model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X,", "11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32,", "Safe to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms", "complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond =", "8., 9., 11., 12., 15., 16., 17., 20., 25., 30.,", "max_num_neighbors: int Maximum number of neighbors possible for an atom.", "frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])", "= np.zeros((batch_size, N_2, num_features)) for i in range(batch_size): frag2_X_b[i] =", "nearest neighbors graph in 3D space. The AtomicConvModel leverages these", "\"MIT\" import sys from deepchem.models import KerasModel from deepchem.models.layers import", "return i return -1 return np.array([place_holder(x) for x in z])", "for atom in range(N_1): for i in range(batch_size): atom_nbrs =", "inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X,", "= [x for x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3))", "None) Weight tensor. biases: tf.Tensor, optional (Default None) Bias tensor.", "tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy", "atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)", "frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs atom_types = self.atom_types", "tf.Variable Initialized weights. biases: tf.Variable Initialized biases. \"\"\" if weights", "N_1, num_features)) for i in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b", "ind, (F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True,", "complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z,", "tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1)", "= initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i],", "preprint arXiv:1703.10603 (2017). The atomic convolutional networks function as a", "weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs):", "12.0 ], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001,", "= F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i in", "int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1", "from deepchem.models import KerasModel from deepchem.models.layers import AtomicConvolution from deepchem.models.losses", "self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers): weight, bias =", "model. \"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[", "complex_X_b = np.zeros((batch_size, N, num_features)) for i in range(batch_size): complex_X_b[i]", "---------- frag1_num_atoms: int Number of atoms in first fragment frag2_num_atoms:", "Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs", "layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i]", "N, num_features)) for i in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs", "TODO: add description layer_sizes: list TODO: add description learning_rate: float", "self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv =", "def atomnet(current_input, atomtype): prev_layer = current_input for i in range(num_layers):", "neighbors are spatial neighbors. atom_types: list List of atoms recognized", "tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution Model. Implements", "tensorflow.keras.layers import Input, Layer import numpy as np import tensorflow", "name=None): \"\"\"Initializes weights and biases to be used in a", "biases. \"\"\" if weights is None: weights = tf.random.truncated_normal([prev_layer_size, size],", "3D space. The AtomicConvModel leverages these connections in 3D space", "indicated by their nuclear numbers. radial: list TODO: add description", "6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5,", "\"Atomic convolutional networks for predicting protein-ligand binding affinity.\" arXiv preprint", "\"<NAME>\" __copyright__ = \"Copyright 2017, Stanford University\" __license__ = \"MIT\"", "op, optional (Defaults to 'fully_connected' if None) Returns ------- weights:", "batch_size=24, atom_types=[ 6, 7., 8., 9., 11., 12., 15., 16.,", "self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers): weight, bias = initializeWeightsBiases(", "np.zeros((batch_size, N_2)) for i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i]", "1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy", "-1. ], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,", "complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9., 11., 12.,", "range(N): for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i,", "tensor. name: str Name for this op, optional (Defaults to", "__author__ = \"<NAME>\" __copyright__ = \"Copyright 2017, Stanford University\" __license__", "frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for", "networks for predicting protein-ligand binding affinity.\" arXiv preprint arXiv:1703.10603 (2017).", "return -1 return np.array([place_holder(x) for x in z]) for epoch", "self.type_biases = [] self.output_weights = [] self.output_biases = [] n_features", "= tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x:", "if i in self.atom_types: return i return -1 return np.array([place_holder(x)", "__init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes", "= tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros =", "AtomicConvolution from deepchem.models.losses import L2Loss from tensorflow.keras.layers import Input, Layer", "description layer_sizes: list TODO: add description learning_rate: float Learning rate", "np import tensorflow as tf import itertools def initializeWeightsBiases(prev_layer_size, size,", "frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z =", "z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom in range(N_2):", "graph convolutions. The difference is that the \"graph\" here is", "Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv", "\"\"\" if weights is None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)", "frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score)", "frag1_Z_b = np.zeros((batch_size, N_1)) for i in range(batch_size): z =", "complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b =", "of atoms in first fragment frag2_num_atoms: int Number of atoms", "= layer_sizes def build(self, input_shape): self.type_weights = [] self.type_biases =", "atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i,", "= tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return w, b", "1) binding_energy = complex_energy - (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy,", "biases = tf.zeros([size]) w = tf.Variable(weights, name='w') b = tf.Variable(biases,", "atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M))", "space. The AtomicConvModel leverages these connections in 3D space to", "binding affinity.\" arXiv preprint arXiv:1703.10603 (2017). The atomic convolutional networks", "frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy", "num_layers = len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for x", "stddev=0.01) if biases is None: biases = tf.zeros([size]) w =", "frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9.,", "neighbors possible for an atom. Recall neighbors are spatial neighbors.", "self.atom_types = atom_types rp = [x for x in itertools.product(*radial)]", "= F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j,", "= tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype)", "return np.array([place_holder(x) for x in z]) for epoch in range(epochs):", "frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel,", "range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b", "= tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy + frag2_energy)", "complex_Z_b[i, atom_j] inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b,", "in sec max_num_neighbors: int Maximum number of neighbors possible for", "complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs,", "for this op, optional (Defaults to 'fully_connected' if None) Returns", "max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X =", "N, M)) for atom in range(N): for i in range(batch_size):", "from deepchem.models.losses import L2Loss from tensorflow.keras.layers import Input, Layer import", "for x in z]) for epoch in range(epochs): for ind,", "self.output_weights = [] self.output_biases = [] n_features = int(input_shape[0][-1]) layer_sizes", "self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer = current_input", "in this layer. weights: tf.Tensor, optional (Default None) Weight tensor.", "= tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy =", "9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0],", "in first fragment frag2_num_atoms: int Number of atoms in sec", "these connections in 3D space to train models that learn", "list TODO: add description learning_rate: float Learning rate for the", "prev_layer = current_input for i in range(num_layers): layer = tf.nn.bias_add(", "in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs =", "tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is None: biases = tf.zeros([size])", "\"\"\"Implements an Atomic Convolution Model. Implements the atomic convolutional networks", "np.zeros((batch_size, N, num_features)) for i in range(batch_size): complex_X_b[i] = F_b[i][6]", "import L2Loss from tensorflow.keras.layers import Input, Layer import numpy as", "atoms in first fragment frag2_num_atoms: int Number of atoms in", "N, M)) complex_Z_b = np.zeros((batch_size, N)) for i in range(batch_size):", "frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy = [] for", "frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9., 11.,", "6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,", "atom. Recall neighbors are spatial neighbors. atom_types: list List of", "import AtomicConvolution from deepchem.models.losses import L2Loss from tensorflow.keras.layers import Input,", "complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size, 1)) yield (inputs, [y_b],", "here is the nearest neighbors graph in 3D space. The", "binding_energy = complex_energy - (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1)", "to predict energetic state starting from the spatial geometry of", "32, 16], learning_rate=0.001, **kwargs): \"\"\" Parameters ---------- frag1_num_atoms: int Number", "frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b", "max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types rp = [x", "frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z =", "in layer_sizes] bias_init_consts = [0.0] * num_layers for ind, atomtype", "atoms recognized by model. Atoms are indicated by their nuclear", "to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms =", "complex_layer, frag1_z, frag2_z, complex_z = inputs atom_types = self.atom_types num_layers", "initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): \"\"\"Initializes weights and biases to", "= tf.zeros([size]) w = tf.Variable(weights, name='w') b = tf.Variable(biases, name='b')", "return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)", "Number of atoms in first fragment frag2_num_atoms: int Number of", "= batch_size self.atom_types = atom_types rp = [x for x", "= tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z,", "shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size =", "tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer): \"\"\"The scoring function", "np.sqrt(x) for x in layer_sizes] bias_init_consts = [0.0] * num_layers", "atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N, M))", "deepchem.models import KerasModel from deepchem.models.layers import AtomicConvolution from deepchem.models.losses import", "range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size,", "= tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy)", "complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N)) for", "initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]]))", "N_2, num_features)) for i in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b", "None) Returns ------- weights: tf.Variable Initialized weights. biases: tf.Variable Initialized", "atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for", "L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True):", "[0.0] * num_layers for ind, atomtype in enumerate(self.atom_types): prev_layer_size =", "complex_z = inputs atom_types = self.atom_types num_layers = len(self.layer_sizes) def", "frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1)", "self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1, mode='fit',", "AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types,", "= F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i in", "in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M))", "self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms M = self.max_num_neighbors", "i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)]", "a fully-connected layer. Parameters ---------- prev_layer_size: int Number of features", "N_2 = self.frag2_num_atoms M = self.max_num_neighbors batch_size = F_b.shape[0] num_features", "[] self.type_biases = [] self.output_weights = [] self.output_biases = []", "cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs =", "= [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b,", "size], stddev=0.01) if biases is None: biases = tf.zeros([size]) w", "atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs,", "tf import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): \"\"\"Initializes", "for i in range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i],", "atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for", "layer_sizes: list TODO: add description learning_rate: float Learning rate for", "self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1)", "for i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z", "= tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy", "complex_Z_b = np.zeros((batch_size, N)) for i in range(batch_size): z =", "frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros =", "complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z =", "frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy =", "recognized by model. Atoms are indicated by their nuclear numbers.", "layer. weights: tf.Tensor, optional (Default None) Weight tensor. biases: tf.Tensor,", "tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy =", "ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind),", "for predicting protein-ligand binding affinity.\" arXiv preprint arXiv:1703.10603 (2017). The", "model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,", "frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N,", "= tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype)", "in range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size,", "z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1,", "= Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms,", "in 3D space. The AtomicConvModel leverages these connections in 3D", "j] = frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b", "= np.zeros((batch_size, N, M)) for atom in range(N): for i", "fully-connected layer. Parameters ---------- prev_layer_size: int Number of features in", "pad_batches=True): batch_size = self.batch_size def replace_atom_types(z): def place_holder(i): if i", "np.zeros((batch_size, N_1)) for i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i]", "learn to predict energetic state starting from the spatial geometry", "frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for", "= z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom in", "= np.zeros((batch_size, N_2, M)) for atom in range(N_2): for i", "3D space to train models that learn to predict energetic", "re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms", "batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1,", "= self.frag1_num_atoms N_2 = self.frag2_num_atoms M = self.max_num_neighbors batch_size =", "sec max_num_neighbors: int Maximum number of neighbors possible for an", "= np.zeros((batch_size, N, num_features)) for i in range(batch_size): complex_X_b[i] =", "self.atom_types = atom_types self.layer_sizes = layer_sizes def build(self, input_shape): self.type_weights", "num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer = current_input for", "= [0.0] * num_layers for ind, atomtype in enumerate(self.atom_types): prev_layer_size", "in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j] frag2_Nbrs =", "= tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs,", "tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z,", "frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,", "[1 / np.sqrt(x) for x in layer_sizes] bias_init_consts = [0.0]", "for i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z", "atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2))", "np.zeros((batch_size, N_2, num_features)) for i in range(batch_size): frag2_X_b[i] = F_b[i][3]", "in range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)] =", "of features in previous layer. size: int Number of nodes", "frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score", "import tensorflow as tf import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None,", "int Number of features in previous layer. size: int Number", "num_features)) for i in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs =", "= complex_Z_b[i, atom_j] inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b,", "atom_types rp = [x for x in itertools.product(*radial)] frag1_X =", "None) Bias tensor. name: str Name for this op, optional", "N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i in range(batch_size):", "as a variant of graph convolutions. The difference is that", "atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs =", "Stanford University\" __license__ = \"MIT\" import sys from deepchem.models import", "boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv,", "x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind),", "weights=None, biases=None, name=None): \"\"\"Initializes weights and biases to be used", "= AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ])", "ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms", "prev_layer_size: int Number of features in previous layer. size: int", "self.frag2_num_atoms M = self.max_num_neighbors batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1]", "atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes =", "frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv", "number of neighbors possible for an atom. Recall neighbors are", "frag2_z, complex_z ]) model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z,", "Initialized weights. biases: tf.Variable Initialized biases. \"\"\" if weights is", "Number of nodes in this layer. weights: tf.Tensor, optional (Default", "__copyright__ = \"Copyright 2017, Stanford University\" __license__ = \"MIT\" import", "def build(self, input_shape): self.type_weights = [] self.type_biases = [] self.output_weights", "j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]", "= initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer,", "tf.Tensor, optional (Default None) Weight tensor. biases: tf.Tensor, optional (Default", "self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ]) model = tf.keras.Model( inputs=[", "frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i in range(batch_size): frag2_X_b[i]", "self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z,", "range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features)) for", "current_input for i in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]),", "optional (Defaults to 'fully_connected' if None) Returns ------- weights: tf.Variable", "tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs, 1) frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy", "complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution(", "[0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): \"\"\"", "Parameters ---------- prev_layer_size: int Number of features in previous layer.", "1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0,", "M)) for atom in range(N_1): for i in range(batch_size): atom_nbrs", "networks as introduced in <NAME> al. \"Atomic convolutional networks for", "in <NAME> al. \"Atomic convolutional networks for predicting protein-ligand binding", "range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)", "as introduced in <NAME> al. \"Atomic convolutional networks for predicting", "are indicated by their nuclear numbers. radial: list TODO: add", "w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N =", "[] complex_atomtype_energy = [] for ind, atomtype in enumerate(atom_types): frag1_outputs", "tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x,", "= np.zeros((batch_size, N_1)) for i in range(batch_size): z = replace_atom_types(F_b[i][2])", "in range(N_2): for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\")", "mode='fit', deterministic=True, pad_batches=True): batch_size = self.batch_size def replace_atom_types(z): def place_holder(i):", "Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp,", "self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros =", "int Maximum number of neighbors possible for an atom. Recall", "tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]),", "z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for atom in range(N_1):", "layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): \"\"\" Parameters ---------- frag1_num_atoms: int", "F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1))", "frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types", "4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): \"\"\" Parameters", "al. \"Atomic convolutional networks for predicting protein-ligand binding affinity.\" arXiv", "def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7.,", "z]) for epoch in range(epochs): for ind, (F_b, y_b, w_b,", "len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer = current_input for i in", "------- weights: tf.Variable Initialized weights. biases: tf.Variable Initialized biases. \"\"\"", "layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer", "for ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x,", "3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5,", "8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): \"\"\" Parameters ----------", "now. Safe to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms", "replace_atom_types(z): def place_holder(i): if i in self.atom_types: return i return", "complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs =", "convolutional networks function as a variant of graph convolutions. The", "(Default None) Weight tensor. biases: tf.Tensor, optional (Default None) Bias", "**kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes = layer_sizes def", "frag1_z, frag2_z, complex_z ]) model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs,", "], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0,", "= F_b[i][6] frag1_Nbrs = np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size,", "atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] inputs", "is None: biases = tf.zeros([size]) w = tf.Variable(weights, name='w') b", "variant of graph convolutions. The difference is that the \"graph\"", "[0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs): \"\"\" Parameters ---------- frag1_num_atoms:", "self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score =", "frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size,", "complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z,", "+ frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic", "\"\"\"The scoring function used by the atomic convolution models.\"\"\" def", "8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0,", "self.max_num_neighbors batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size,", "7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0", "atom in range(N): for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom,", "weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size", "for x in layer_sizes] bias_init_consts = [0.0] * num_layers for", "value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias =", "num_layers for ind, atomtype in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([])", "ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond", "(2017). The atomic convolutional networks function as a variant of", "layer_sizes] bias_init_consts = [0.0] * num_layers for ind, atomtype in", "frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z, frag2_z, complex_X, complex_nbrs,", "frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__(", "in a fully-connected layer. Parameters ---------- prev_layer_size: int Number of", "dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy = [] complex_atomtype_energy = []", "F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features)) for i in range(batch_size):", "np.zeros((batch_size, N_2, M)) for atom in range(N_2): for i in", "None: biases = tf.zeros([size]) w = tf.Variable(weights, name='w') b =", "i in range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal(", "self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size", "i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z", "= [] n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers =", "F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j", ":len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom,", "complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z =", "protein-ligand binding affinity.\" arXiv preprint arXiv:1703.10603 (2017). The atomic convolutional", "Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))", "models that learn to predict energetic state starting from the", "the \"graph\" here is the nearest neighbors graph in 3D", "np.zeros((batch_size, N, M)) for atom in range(N): for i in", "i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z", "\"\"\" Parameters ---------- frag1_num_atoms: int Number of atoms in first", "spatial neighbors. atom_types: list List of atoms recognized by model.", "**kwargs) def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True, pad_batches=True): batch_size =", "frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs,", "and biases to be used in a fully-connected layer. Parameters", "the model. \"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12, batch_size=24,", "prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in", "rp = [x for x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms,", "np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size, N)) for i in", "z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N,", "arXiv:1703.10603 (2017). The atomic convolutional networks function as a variant", "deepchem.models.layers import AtomicConvolution from deepchem.models.losses import L2Loss from tensorflow.keras.layers import", "= current_input for i in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer,", "import itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): \"\"\"Initializes weights", "Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z", "to train models that learn to predict energetic state starting", "np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] =", "11., 12., 15., 16., 17., 20., 25., 30., 35., 53.,", "tf.Tensor, optional (Default None) Bias tensor. name: str Name for", "atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv,", "complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs,", "frag2_z, complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model,", "7., 8., 9., 11., 12., 15., 16., 17., 20., 25.,", "= AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution(", "axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution Model. Implements the", "max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X,", "convolution models.\"\"\" def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types", "# TODO: Turning off queue for now. Safe to re-activate?", "frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom", "neighbors graph in 3D space. The AtomicConvModel leverages these connections", "Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X", "self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze(", "= Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types,", "(Defaults to 'fully_connected' if None) Returns ------- weights: tf.Variable Initialized", "(Default None) Bias tensor. name: str Name for this op,", "i return -1 return np.array([place_holder(x) for x in z]) for", "replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M)) for", "in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer =", "atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for", "Implements the atomic convolutional networks as introduced in <NAME> al.", "size, weights=None, biases=None, name=None): \"\"\"Initializes weights and biases to be", "function used by the atomic convolution models.\"\"\" def __init__(self, atom_types,", "**kwargs): \"\"\" Parameters ---------- frag1_num_atoms: int Number of atoms in", "np.array([place_holder(x) for x in z]) for epoch in range(epochs): for", "i in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2,", "atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp,", "frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i", "List of atoms recognized by model. Atoms are indicated by", "atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp,", "AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ]) model", "for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom,", "= np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom, j]", "this op, optional (Defaults to 'fully_connected' if None) Returns -------", "prev_layer = layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0]))", "<NAME> al. \"Atomic convolutional networks for predicting protein-ligand binding affinity.\"", "atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond,", "= tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer): \"\"\"The scoring", "for i in range(batch_size): atom_nbrs = F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom,", "self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight)", "10.0, 10.5, 11.0, 11.5, 12.0 ], [0.0, 4.0, 8.0], [0.4]],", "16], learning_rate=0.001, **kwargs): \"\"\" Parameters ---------- frag1_num_atoms: int Number of", "output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros", "= np.zeros((batch_size, N_1, M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i", "atom, j] = complex_Z_b[i, atom_j] inputs = [ frag1_X_b, frag1_Nbrs,", "frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b", "def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): \"\"\"Initializes weights and biases", "7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5,", "model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1, mode='fit', deterministic=True,", "in previous layer. size: int Number of nodes in this", "= tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer,", "= z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for atom in", "of neighbors possible for an atom. Recall neighbors are spatial", "float Learning rate for the model. \"\"\" # TODO: Turning", "= z frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom in", "convolutional networks as introduced in <NAME> al. \"Atomic convolutional networks", "y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N", "in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))", "max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs =", "for ind, (F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches( batch_size,", "enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j] inputs = [", "self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors", "radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X,", "atom_types = self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer", "atom in range(N_1): for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom,", "F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i in range(batch_size):", "atomtype): prev_layer = current_input for i in range(num_layers): layer =", "Returns ------- weights: tf.Variable Initialized weights. biases: tf.Variable Initialized biases.", "introduced in <NAME> al. \"Atomic convolutional networks for predicting protein-ligand", "in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features))", "frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms,", "max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,)) self._frag1_conv =", "state starting from the spatial geometry of the model. \"\"\"", "layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes = layer_sizes", "16., 17., 20., 25., 30., 35., 53., -1. ], radial=[[", "= n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers):", "x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs,", "of atoms in sec max_num_neighbors: int Maximum number of neighbors", "i in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer", "model. Atoms are indicated by their nuclear numbers. radial: list", "Bias tensor. name: str Name for this op, optional (Defaults", "layer_sizes def build(self, input_shape): self.type_weights = [] self.type_biases = []", "atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)", "biases is None: biases = tf.zeros([size]) w = tf.Variable(weights, name='w')", "frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs atom_types =", "name='w') b = tf.Variable(biases, name='b') return w, b class AtomicConvScore(Layer):", "self.atom_types: return i return -1 return np.array([place_holder(x) for x in", "models.\"\"\" def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types =", "N = self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms M", "4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0,", "train models that learn to predict energetic state starting from", "N_1)) for i in range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] =", "tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x,", "enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i", "M = self.max_num_neighbors batch_size = F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b", "is None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if biases is", "batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1 = self.frag1_num_atoms N_2", "for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i,", "\"graph\" here is the nearest neighbors graph in 3D space.", "tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond,", "frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond", "= F_b.shape[0] num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features))", "to be used in a fully-connected layer. Parameters ---------- prev_layer_size:", "return w, b class AtomicConvScore(Layer): \"\"\"The scoring function used by", "def replace_atom_types(z): def place_holder(i): if i in self.atom_types: return i", "53., -1. ], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5, 4.0,", "__license__ = \"MIT\" import sys from deepchem.models import KerasModel from", "atom_j] inputs = [ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs,", "frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs = tf.map_fn(lambda", "= np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i", "atomic convolution models.\"\"\" def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs)", "= Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z = Input(shape=(complex_num_atoms,))", "radial: list TODO: add description layer_sizes: list TODO: add description", "tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy -", "num_features = F_b[0][0].shape[1] frag1_X_b = np.zeros((batch_size, N_1, num_features)) for i", "[] n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers = len(layer_sizes)", "n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs", "4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5,", "self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z,", "predicting protein-ligand binding affinity.\" arXiv preprint arXiv:1703.10603 (2017). The atomic", "import Input, Layer import numpy as np import tensorflow as", "range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features)) for", "tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)", "frag1_Z_b[i, atom_j] frag2_Nbrs = np.zeros((batch_size, N_2, M)) frag2_Z_b = np.zeros((batch_size,", "= \"MIT\" import sys from deepchem.models import KerasModel from deepchem.models.layers", "weights: tf.Variable Initialized weights. biases: tf.Variable Initialized biases. \"\"\" if", "frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution", "KerasModel from deepchem.models.layers import AtomicConvolution from deepchem.models.losses import L2Loss from", "in range(N): for i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\")", "is the nearest neighbors graph in 3D space. The AtomicConvModel", "by the atomic convolution models.\"\"\" def __init__(self, atom_types, layer_sizes, **kwargs):", "= max_num_neighbors self.batch_size = batch_size self.atom_types = atom_types rp =", "self.frag1_num_atoms N_2 = self.frag2_num_atoms M = self.max_num_neighbors batch_size = F_b.shape[0]", "self.output_biases.append([]) for i in range(num_layers): weight, bias = initializeWeightsBiases( prev_layer_size=prev_layer_size,", "initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer, frag2_layer, complex_layer,", "complex_energy - (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel):", "F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j", "complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z,", "3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0,", "layer = tf.nn.relu(layer) prev_layer = layer output_layer = tf.squeeze( tf.nn.bias_add(", "complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size, 1)) yield", "frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_nbrs_z =", "\"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in", "max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X =", "are spatial neighbors. atom_types: list List of atoms recognized by", "= AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution(", "an Atomic Convolution Model. Implements the atomic convolutional networks as", "TODO: Turning off queue for now. Safe to re-activate? self.complex_num_atoms", "= np.zeros((batch_size, N_1, M)) for atom in range(N_1): for i", "the model. \"\"\" # TODO: Turning off queue for now.", "build(self, input_shape): self.type_weights = [] self.type_biases = [] self.output_weights =", "= atom_types self.layer_sizes = layer_sizes def build(self, input_shape): self.type_weights =", "self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer output_layer =", "for the model. \"\"\" # TODO: Turning off queue for", "self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv =", "atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs", "str Name for this op, optional (Defaults to 'fully_connected' if", "add description layer_sizes: list TODO: add description learning_rate: float Learning", "frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms,", "of graph convolutions. The difference is that the \"graph\" here", ":len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag1_Nbrs_Z[i, atom,", "TODO: add description learning_rate: float Learning rate for the model.", "biases to be used in a fully-connected layer. Parameters ----------", "AtomicConvModel leverages these connections in 3D space to train models", "Layer import numpy as np import tensorflow as tf import", "atomic convolutional networks function as a variant of graph convolutions.", "= replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))", "frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b = np.zeros((batch_size,", "- (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1) class AtomicConvModel(KerasModel): \"\"\"Implements", "of nodes in this layer. weights: tf.Tensor, optional (Default None)", "/ np.sqrt(x) for x in layer_sizes] bias_init_consts = [0.0] *", "atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))", "of atoms recognized by model. Atoms are indicated by their", "prev_layer_size = layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias)", "import KerasModel from deepchem.models.layers import AtomicConvolution from deepchem.models.losses import L2Loss", "Input(shape=(frag1_num_atoms, max_num_neighbors)) frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs", "range(batch_size): z = replace_atom_types(F_b[i][2]) frag1_Z_b[i] = z frag1_Nbrs_Z = np.zeros((batch_size,", "z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] = z frag2_Nbrs_Z = np.zeros((batch_size, N_2,", "M)) for atom in range(N_2): for i in range(batch_size): atom_nbrs", "in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z =", "import numpy as np import tensorflow as tf import itertools", "num_features)) for i in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b =", "epoch in range(epochs): for ind, (F_b, y_b, w_b, ids_b) in", "Learning rate for the model. \"\"\" # TODO: Turning off", "x: atomnet(x, ind), frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind),", "= F_b[i][4].get(atom, \"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j,", "= Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))", "frag2_layer) complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond =", "tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)", "range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size,", "complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size,", "tensor. biases: tf.Tensor, optional (Default None) Bias tensor. name: str", "complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors", "off queue for now. Safe to re-activate? self.complex_num_atoms = complex_num_atoms", "the atomic convolutional networks as introduced in <NAME> al. \"Atomic", "Atomic Convolution Model. Implements the atomic convolutional networks as introduced", "atomnet(current_input, atomtype): prev_layer = current_input for i in range(num_layers): layer", "L2Loss from tensorflow.keras.layers import Input, Layer import numpy as np", "= np.zeros((batch_size, N_2)) for i in range(batch_size): z = replace_atom_types(F_b[i][5])", "def __init__(self, atom_types, layer_sizes, **kwargs): super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types", "weights and biases to be used in a fully-connected layer.", "tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer = layer output_layer", "output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros", "frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs =", "class AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution Model. Implements the atomic", "function as a variant of graph convolutions. The difference is", "17., 20., 25., 30., 35., 53., -1. ], radial=[[ 1.5,", "[] self.output_biases = [] n_features = int(input_shape[0][-1]) layer_sizes = self.layer_sizes", "w, b class AtomicConvScore(Layer): \"\"\"The scoring function used by the", "F_b[i][0] frag2_X_b = np.zeros((batch_size, N_2, num_features)) for i in range(batch_size):", "boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs,", "np.zeros((batch_size, N)) for i in range(batch_size): z = replace_atom_types(F_b[i][8]) complex_Z_b[i]", "layer_sizes)([ self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z, frag2_z, complex_z ]) model =", "if biases is None: biases = tf.zeros([size]) w = tf.Variable(weights,", "N_1, M)) for atom in range(N_1): for i in range(batch_size):", "= replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N, M))", "for i in range(num_layers): layer = tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i])", "Recall neighbors are spatial neighbors. atom_types: list List of atoms", "previous layer. size: int Number of nodes in this layer.", "ind, atomtype in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([])", "= layer output_layer = tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return", "of the model. \"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634, complex_num_atoms=701, max_num_neighbors=12,", "radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z]) self._complex_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X,", "for atom in range(N_2): for i in range(batch_size): atom_nbrs =", "[ frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b,", "deepchem.models.losses import L2Loss from tensorflow.keras.layers import Input, Layer import numpy", "replace_atom_types(F_b[i][8]) complex_Z_b[i] = z complex_Nbrs_Z = np.zeros((batch_size, N, M)) for", "AtomicConvModel(KerasModel): \"\"\"Implements an Atomic Convolution Model. Implements the atomic convolutional", "learning_rate=0.001, **kwargs): \"\"\" Parameters ---------- frag1_num_atoms: int Number of atoms", "frag2_Nbrs_Z = np.zeros((batch_size, N_2, M)) for atom in range(N_2): for", "a variant of graph convolutions. The difference is that the", "= [] frag2_atomtype_energy = [] complex_atomtype_energy = [] for ind,", "x in z]) for epoch in range(epochs): for ind, (F_b,", "3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z", "size: int Number of nodes in this layer. weights: tf.Tensor,", "description learning_rate: float Learning rate for the model. \"\"\" #", "atom_types self.layer_sizes = layer_sizes def build(self, input_shape): self.type_weights = []", "= Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms,", "= tf.nn.bias_add( tf.matmul(prev_layer, self.type_weights[atomtype][i]), self.type_biases[atomtype][i]) layer = tf.nn.relu(layer) prev_layer =", "enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer) frag2_outputs =", "connections in 3D space to train models that learn to", "for atom in range(N): for i in range(batch_size): atom_nbrs =", "as np import tensorflow as tf import itertools def initializeWeightsBiases(prev_layer_size,", "in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for", "Maximum number of neighbors possible for an atom. Recall neighbors", "frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features)) for i", "N_2, M)) for atom in range(N_2): for i in range(batch_size):", "\"\") frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in", "Initialized biases. \"\"\" if weights is None: weights = tf.random.truncated_normal([prev_layer_size,", "bias_init_consts = [0.0] * num_layers for ind, atomtype in enumerate(self.atom_types):", "= self.complex_num_atoms N_1 = self.frag1_num_atoms N_2 = self.frag2_num_atoms M =", "M)) frag1_Z_b = np.zeros((batch_size, N_1)) for i in range(batch_size): z", "for i in range(batch_size): atom_nbrs = F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom,", "= frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors = max_num_neighbors self.batch_size =", "= np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j]", "max_num_neighbors=12, batch_size=24, atom_types=[ 6, 7., 8., 9., 11., 12., 15.,", "frag2_num_atoms: int Number of atoms in sec max_num_neighbors: int Maximum", "Weight tensor. biases: tf.Tensor, optional (Default None) Bias tensor. name:", "N_2)) for i in range(batch_size): z = replace_atom_types(F_b[i][5]) frag2_Z_b[i] =", "graph in 3D space. The AtomicConvModel leverages these connections in", "energetic state starting from the spatial geometry of the model.", "tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs", "super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def default_generator(self, dataset, epochs=1,", "= inputs atom_types = self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input,", "stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight,", "= frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size, N, M)) complex_Z_b =", "= [] for ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda", "ind), frag1_layer) frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer) complex_outputs", "atomic convolutional networks as introduced in <NAME> al. \"Atomic convolutional", "= [] self.type_biases = [] self.output_weights = [] self.output_biases =", "in self.atom_types: return i return -1 return np.array([place_holder(x) for x", "w = tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return w,", "tf.map_fn(lambda x: atomnet(x, ind), complex_layer) cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond,", "complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros)) frag1_outputs = tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy)", "= complex_num_atoms self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.max_num_neighbors =", "cond = tf.equal(frag2_z, atomtype) frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z,", "by their nuclear numbers. radial: list TODO: add description layer_sizes:", "= np.zeros((batch_size, N)) for i in range(batch_size): z = replace_atom_types(F_b[i][8])", "i in range(batch_size): atom_nbrs = F_b[i][7].get(atom, \"\") complex_Nbrs[i, atom, :len(atom_nbrs)]", "complex_z ]) model = tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z,", "size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias)", "boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag2_X, frag2_nbrs,", "N_2, M)) frag2_Z_b = np.zeros((batch_size, N_2)) for i in range(batch_size):", "AtomicConvScore(Layer): \"\"\"The scoring function used by the atomic convolution models.\"\"\"", "atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i,", "* num_layers for ind, atomtype in enumerate(self.atom_types): prev_layer_size = n_features", "itertools def initializeWeightsBiases(prev_layer_size, size, weights=None, biases=None, name=None): \"\"\"Initializes weights and", "The AtomicConvModel leverages these connections in 3D space to train", "\"\"\" # TODO: Turning off queue for now. Safe to", "frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs):", "frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b =", "2017, Stanford University\" __license__ = \"MIT\" import sys from deepchem.models", "affinity.\" arXiv preprint arXiv:1703.10603 (2017). The atomic convolutional networks function", "an atom. Recall neighbors are spatial neighbors. atom_types: list List", "25., 30., 35., 53., -1. ], radial=[[ 1.5, 2.0, 2.5,", "frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b,", "tf.Variable(weights, name='w') b = tf.Variable(biases, name='b') return w, b class", "complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(), batch_size=batch_size, **kwargs) def", "frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs):", "1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy = complex_energy - (frag1_energy", "spatial geometry of the model. \"\"\" def __init__(self, frag1_num_atoms=70, frag2_num_atoms=634,", "tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32) frag2_zeros", "inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs atom_types", "University\" __license__ = \"MIT\" import sys from deepchem.models import KerasModel", "from tensorflow.keras.layers import Input, Layer import numpy as np import", "atomtype in enumerate(self.atom_types): prev_layer_size = n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([])", "inputs atom_types = self.atom_types num_layers = len(self.layer_sizes) def atomnet(current_input, atomtype):", "[] for ind, atomtype in enumerate(atom_types): frag1_outputs = tf.map_fn(lambda x:", "bias = initializeWeightsBiases( prev_layer_size=prev_layer_size, size=layer_sizes[i], weights=tf.random.truncated_normal( shape=[prev_layer_size, layer_sizes[i]], stddev=weight_init_stddevs[i]), biases=tf.constant(", "self.batch_size = batch_size self.atom_types = atom_types rp = [x for", "'fully_connected' if None) Returns ------- weights: tf.Variable Initialized weights. biases:", "= Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))", "= F_b[i][1].get(atom, \"\") frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs) for j,", "complex_X, complex_nbrs, complex_nbrs_z, complex_z ], outputs=score) super(AtomicConvModel, self).__init__( model, L2Loss(),", "Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))", "the spatial geometry of the model. \"\"\" def __init__(self, frag1_num_atoms=70,", "2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5,", "n_features self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers): weight,", "], [0.0, 4.0, 8.0], [0.4]], layer_sizes=[32, 32, 16], learning_rate=0.001, **kwargs):", "int Number of atoms in first fragment frag2_num_atoms: int Number", "atoms in sec max_num_neighbors: int Maximum number of neighbors possible", "for now. Safe to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms =", "AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z]) score = AtomicConvScore(self.atom_types, layer_sizes)([", "frag1_z = Input(shape=(frag1_num_atoms,)) frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms,", "for i in range(batch_size): complex_X_b[i] = F_b[i][6] frag1_Nbrs = np.zeros((batch_size,", "j, atom_j in enumerate(atom_nbrs): complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]", "fragment frag2_num_atoms: int Number of atoms in sec max_num_neighbors: int", "= tf.keras.Model( inputs=[ frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs, frag2_nbrs_z,", "import sys from deepchem.models import KerasModel from deepchem.models.layers import AtomicConvolution", "for i in range(batch_size): frag1_X_b[i] = F_b[i][0] frag2_X_b = np.zeros((batch_size,", "class AtomicConvScore(Layer): \"\"\"The scoring function used by the atomic convolution", "M)) for atom in range(N): for i in range(batch_size): atom_nbrs", "enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j] complex_Nbrs = np.zeros((batch_size,", "[x for x in itertools.product(*radial)] frag1_X = Input(shape=(frag1_num_atoms, 3)) frag1_nbrs", "batch_size = self.batch_size def replace_atom_types(z): def place_holder(i): if i in", "= np.array(atom_nbrs) for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j]", "frag1_Z_b, frag2_X_b, frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs, complex_Nbrs_Z, complex_Z_b ]", "frag2_energy = tf.reduce_sum(frag2_outputs, 1) complex_energy = tf.reduce_sum(complex_outputs, 1) binding_energy =", "35., 53., -1. ], radial=[[ 1.5, 2.0, 2.5, 3.0, 3.5,", "int Number of nodes in this layer. weights: tf.Tensor, optional", "frag2_outputs, frag2_zeros)) cond = tf.equal(complex_z, atomtype) complex_atomtype_energy.append( tf.where(cond, complex_outputs, complex_zeros))", "queue for now. Safe to re-activate? self.complex_num_atoms = complex_num_atoms self.frag1_num_atoms", "in 3D space to train models that learn to predict", "AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv = AtomicConvolution( atom_types=self.atom_types,", "= self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x)", "= tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z, atomtype)", "j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]", "optional (Default None) Weight tensor. biases: tf.Tensor, optional (Default None)", "= [] self.output_weights = [] self.output_biases = [] n_features =", "biases: tf.Tensor, optional (Default None) Bias tensor. name: str Name", "networks function as a variant of graph convolutions. The difference", "= complex_energy - (frag1_energy + frag2_energy) return tf.expand_dims(binding_energy, axis=1) class", "list TODO: add description layer_sizes: list TODO: add description learning_rate:", "= int(input_shape[0][-1]) layer_sizes = self.layer_sizes num_layers = len(layer_sizes) weight_init_stddevs =", "in enumerate( dataset.iterbatches( batch_size, deterministic=True, pad_batches=pad_batches)): N = self.complex_num_atoms N_1", "their nuclear numbers. radial: list TODO: add description layer_sizes: list", "arXiv preprint arXiv:1703.10603 (2017). The atomic convolutional networks function as", "numbers. radial: list TODO: add description layer_sizes: list TODO: add", "Input(shape=(complex_num_atoms,)) self._frag1_conv = AtomicConvolution( atom_types=self.atom_types, radial_params=rp, boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z]) self._frag2_conv", "range(epochs): for ind, (F_b, y_b, w_b, ids_b) in enumerate( dataset.iterbatches(", "The atomic convolutional networks function as a variant of graph", "12., 15., 16., 17., 20., 25., 30., 35., 53., -1.", "call(self, inputs): frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs", "if None) Returns ------- weights: tf.Variable Initialized weights. biases: tf.Variable", "tf.squeeze( tf.nn.bias_add( tf.matmul(prev_layer, self.output_weights[atomtype][0]), self.output_biases[atomtype][0])) return output_layer frag1_zeros = tf.zeros_like(frag1_z,", "super(AtomicConvScore, self).__init__(**kwargs) self.atom_types = atom_types self.layer_sizes = layer_sizes def build(self,", "neighbors. atom_types: list List of atoms recognized by model. Atoms", "3)) complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors)) complex_z", "= tf.zeros_like(frag2_z, dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = []", "tf.add_n(frag1_atomtype_energy) frag2_outputs = tf.add_n(frag2_atomtype_energy) complex_outputs = tf.add_n(complex_atomtype_energy) frag1_energy = tf.reduce_sum(frag1_outputs,", "leverages these connections in 3D space to train models that", "model. \"\"\" # TODO: Turning off queue for now. Safe", "name: str Name for this op, optional (Defaults to 'fully_connected'", "= len(layer_sizes) weight_init_stddevs = [1 / np.sqrt(x) for x in", "= Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms, 3))", "= len(self.layer_sizes) def atomnet(current_input, atomtype): prev_layer = current_input for i", "self.type_weights.append([]) self.type_biases.append([]) self.output_weights.append([]) self.output_biases.append([]) for i in range(num_layers): weight, bias", "frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_z = Input(shape=(frag2_num_atoms,)) complex_X = Input(shape=(complex_num_atoms,", "if weights is None: weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01) if", "from the spatial geometry of the model. \"\"\" def __init__(self,", "in z]) for epoch in range(epochs): for ind, (F_b, y_b,", "to 'fully_connected' if None) Returns ------- weights: tf.Variable Initialized weights.", "[] self.output_weights = [] self.output_biases = [] n_features = int(input_shape[0][-1])", "for j, atom_j in enumerate(atom_nbrs): frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i,", "dtype=tf.float32) complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32) frag1_atomtype_energy = [] frag2_atomtype_energy =", "cond = tf.equal(frag1_z, atomtype) frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros)) cond = tf.equal(frag2_z,", "first fragment frag2_num_atoms: int Number of atoms in sec max_num_neighbors:", "= \"Copyright 2017, Stanford University\" __license__ = \"MIT\" import sys", "complex_Nbrs_Z, complex_Z_b ] y_b = np.reshape(y_b, newshape=(batch_size, 1)) yield (inputs,", "= F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features)) for i in", "biases=tf.constant( value=bias_init_consts[i], shape=[layer_sizes[i]])) self.type_weights[ind].append(weight) self.type_biases[ind].append(bias) prev_layer_size = layer_sizes[i] weight, bias", "nuclear numbers. radial: list TODO: add description layer_sizes: list TODO:", "frag2_X = Input(shape=(frag2_num_atoms, 3)) frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors)) frag2_nbrs_z =", "in range(batch_size): frag2_X_b[i] = F_b[i][3] complex_X_b = np.zeros((batch_size, N, num_features))", "= layer_sizes[i] weight, bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def", "self.type_weights = [] self.type_biases = [] self.output_weights = [] self.output_biases", "self.batch_size def replace_atom_types(z): def place_holder(i): if i in self.atom_types: return", "bias = initializeWeightsBiases(prev_layer_size, 1) self.output_weights[ind].append(weight) self.output_biases[ind].append(bias) def call(self, inputs): frag1_layer," ]
[ "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration", "this permission notice shall be included in all copies or", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "the Software, and to permit persons to whom the Software", "oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs:", "and associated documentation files (the \"Software\"), to deal in the", "above copyright notice and this permission notice shall be included", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "notice shall be included in all copies or substantial portions", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config =", "oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile()", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "to whom the Software is furnished to do so, subject", "= YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\", ConsoleConfiguration(), \".\")", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\"", "oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config,", "classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config", "the following conditions: The above copyright notice and this permission", "files (the \"Software\"), to deal in the Software without restriction,", "test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\",", "yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertIsNone(oob_config.classname)", "SOFTWARE. \"\"\" import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "is furnished to do so, subject to the following conditions:", "= yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config =", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\")", "DESIGN, Inc. Permission is hereby granted, free of charge, to", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "yaml.load_from_text(\"\"\" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import unittest", "oobs: default: \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config", "software and associated documentation files (the \"Software\"), to deal in", "to any person obtaining a copy of this software and", "\".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config)", "notice and this permission notice shall be included in all", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "is hereby granted, free of charge, to any person obtaining", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import", "shall be included in all copies or substantial portions of", "person obtaining a copy of this software and associated documentation", "all copies or substantial portions of the Software. THE SOFTWARE", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\")", "\"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\",", "Software without restriction, including without limitation the rights to use,", "(the \"Software\"), to deal in the Software without restriction, including", "2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of", "import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self):", "class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain:", "merge, publish, distribute, sublicense, and/or sell copies of the Software,", "and this permission notice shall be included in all copies", "so, subject to the following conditions: The above copyright notice", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "to the following conditions: The above copyright notice and this", "charge, to any person obtaining a copy of this software", "copies of the Software, and to permit persons to whom", "the Software without restriction, including without limitation the rights to", "hereby granted, free of charge, to any person obtaining a", "to do so, subject to the following conditions: The above", "conditions: The above copyright notice and this permission notice shall", "to deal in the Software without restriction, including without limitation", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "= YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\",", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "following conditions: The above copyright notice and this permission notice", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "and/or sell copies of the Software, and to permit persons", "permit persons to whom the Software is furnished to do", "Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted,", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "do so, subject to the following conditions: The above copyright", "in the Software without restriction, including without limitation the rights", "def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default:", "permission notice shall be included in all copies or substantial", "any person obtaining a copy of this software and associated", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\")", "self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain:", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "(c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "free of charge, to any person obtaining a copy of", "brain: oobs: default: \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config)", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "the Software is furnished to do so, subject to the", "Software, and to permit persons to whom the Software is", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config)", "oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self):", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\",", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "copy of this software and associated documentation files (the \"Software\"),", "persons to whom the Software is furnished to do so,", "limitation the rights to use, copy, modify, merge, publish, distribute,", "subject to the following conditions: The above copyright notice and", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "\"\"\" import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import", "brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config =", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "including without limitation the rights to use, copy, modify, merge,", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "documentation files (the \"Software\"), to deal in the Software without", "programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import", "copyright notice and this permission notice shall be included in", "BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml =", "from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase):", "programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile()", "without restriction, including without limitation the rights to use, copy,", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "= yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\")", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\", ConsoleConfiguration(), \".\") brain_config", "ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config)", "unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from", "self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\", ConsoleConfiguration(), \".\") brain_config =", "from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml =", "and to permit persons to whom the Software is furnished", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import unittest from", "copies or substantial portions of the Software. THE SOFTWARE IS", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "= BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml", "IN THE SOFTWARE. \"\"\" import unittest from programy.config.file.yaml_file import YamlConfigurationFile", "Software is furnished to do so, subject to the following", "\".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\"", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "DEALINGS IN THE SOFTWARE. \"\"\" import unittest from programy.config.file.yaml_file import", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "without limitation the rights to use, copy, modify, merge, publish,", "import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration", "sell copies of the Software, and to permit persons to", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\")", "included in all copies or substantial portions of the Software.", "granted, free of charge, to any person obtaining a copy", "restriction, including without limitation the rights to use, copy, modify,", "whom the Software is furnished to do so, subject to", "\"\"\" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby", "Inc. Permission is hereby granted, free of charge, to any", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" import unittest from programy.config.file.yaml_file", "to permit persons to whom the Software is furnished to", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def", "self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml,", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "in all copies or substantial portions of the Software. THE", "of charge, to any person obtaining a copy of this", "obtaining a copy of this software and associated documentation files", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "THE SOFTWARE. \"\"\" import unittest from programy.config.file.yaml_file import YamlConfigurationFile from", "import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml)", "yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: \"\"\", ConsoleConfiguration(),", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml", "Permission is hereby granted, free of charge, to any person", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml)", "COTOBA DESIGN, Inc. Permission is hereby granted, free of charge,", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "a copy of this software and associated documentation files (the", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "furnished to do so, subject to the following conditions: The", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "deal in the Software without restriction, including without limitation the", "The above copyright notice and this permission notice shall be", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default:", "brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section(\"oobs\", brain_config) self.assertIsNotNone(oobs_config) oob_config", "sublicense, and/or sell copies of the Software, and to permit", "YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration(\"default\") oob_config.load_config_section(yaml, oobs_config, \".\") self.assertEqual(\"programy.oob.defaults.default.DefaultOutOfBandProcessor\", oob_config.classname)", "publish, distribute, sublicense, and/or sell copies of the Software, and", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "\"Software\"), to deal in the Software without restriction, including without", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "default: \"\"\", ConsoleConfiguration(), \".\") brain_config = yaml.get_section(\"brain\") self.assertIsNotNone(brain_config) oobs_config =", "associated documentation files (the \"Software\"), to deal in the Software", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "distribute, sublicense, and/or sell copies of the Software, and to", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor \"\"\", ConsoleConfiguration(),", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs: default: classname:", "BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(\"\"\" brain: oobs:", "be included in all copies or substantial portions of the" ]
[ "that create specific orb types (whether board change, orb change,", "# Interface for active skills that create specific orb types", "specific orb types (whether board change, orb change, orb spawn,", "orb change, orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self,", "skills that create specific orb types (whether board change, orb", "board change, orb change, orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod", "types (whether board change, orb change, orb spawn, etc) class", "Interface for active skills that create specific orb types (whether", "orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute)", "import OrbAttribute # Interface for active skills that create specific", "OrbAttribute # Interface for active skills that create specific orb", "spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute) ->", "etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute) -> bool:", "class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute: OrbAttribute) -> bool: pass", "...orb_attribute import OrbAttribute # Interface for active skills that create", "abc from ...orb_attribute import OrbAttribute # Interface for active skills", "active skills that create specific orb types (whether board change,", "from ...orb_attribute import OrbAttribute # Interface for active skills that", "create specific orb types (whether board change, orb change, orb", "change, orb change, orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def", "import abc from ...orb_attribute import OrbAttribute # Interface for active", "orb types (whether board change, orb change, orb spawn, etc)", "change, orb spawn, etc) class OrbGeneratorASI(abc.ABC): @abc.abstractmethod def does_orb_generator_create_orb_attribute(self, orb_attribute:", "for active skills that create specific orb types (whether board", "(whether board change, orb change, orb spawn, etc) class OrbGeneratorASI(abc.ABC):" ]
[ "print(\"The mystery package wasn't playing nice. Sorry!\") print('Hint: you can", "the README.md text and the long_description type. :rtype: typing.Tuple[str, str]", "str] \"\"\" with open('README.md', 'r') as readme: return (readme.read(), 'text/markdown')", "= package_name.replace('-', '_') # Special case for the 'backports' modules.", "time). # If it exists, overwrite 'mystery' in 'sys.modules'. Else,", "_fix_package_name(package_name: str) -> str: \"\"\" Fix the package name so", "url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION,", "\"\"\" Choose the underlying mysterious package and handle the lockfile's", "sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del", "uses a temporary lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): #", "the chosen package of course. return chosen_package def _fix_package_name(package_name: str)", ":return: fixed mystery package name. :rtype: str \"\"\" # Transform", "different package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__", ":: 5 - Production/Stable', 'License :: OSI Approved :: MIT", "print('Internal error:', error) print(\"The mystery package wasn't playing nice. Sorry!\")", "of the README.md text and the long_description type. :rtype: typing.Tuple[str,", "Dynamically write the __init__.py for the package using the chosen", "of package names. :rtype: typing.List[str] \"\"\" try: # Get the", "the mystery package (it's \"{package_name}\" this time). # If it", "file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw = backup_file.read() return", "so it could be placed in the __init__.py file. :param", "locked package and unlink the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink()", "f''' # Here we're trying to import the mystery package", "ImportError as error: print('Internal error:', error) print(\"The mystery package wasn't", "OSI Approved :: MIT License', 'Operating System :: OS Independent',", "one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except", "the lockfile's state. :return: mystery package name. :rtype: str \"\"\"", "the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name", "- Production/Stable', 'License :: OSI Approved :: MIT License', 'Operating", "(and even that's inconsequential). ''' ) def _get_long_description_data() -> typing.Tuple[str,", "OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language", "SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random',", "the lockfile's path. :return: lockfile path. :rtype: pathlib.Path \"\"\" return", "SDistCommand(sdist): \"\"\" Will be registered as a replacement for pip's", "the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw", "MIT License', 'Operating System :: OS Independent', 'Programming Language ::", "5 - Production/Stable', 'License :: OSI Approved :: MIT License',", "/ '__init__.py' init_py_path.write_text( f''' # Here we're trying to import", "replacement for pip's 'sdist' command. \"\"\" def run(self): dep_lock_path =", "str :rtype: None \"\"\" package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery')", "{package_name} except ImportError as error: print('Internal error:', error) print(\"The mystery", "this only when mystery fails (and even that's inconsequential). '''", "be registered as a replacement for pip's 'sdist' command. \"\"\"", "in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the chosen package of", ":rtype: typing.List[str] \"\"\" try: # Get the top PyPI packages", "description='It is a riddle, wrapped in a mystery, inside an", "dep_lock_path.exists(): # Use the locked package and unlink the lockfile.", "pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will be registered as a replacement", "with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][:", "package!') sorry = 'try reinstalling mystery and get a different", "a different package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__", "open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']]", ":: 3.8', 'Intended Audience :: Other Audience', 'Topic :: Software", "# If it exists, overwrite 'mystery' in 'sys.modules'. Else, print", "FileNotFoundError: pass super().run() def _get_package_list() -> typing.List[str]: \"\"\" Get a", "pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: \"\"\" Assemble", "Approved :: MIT License', 'Operating System :: OS Independent', 'Programming", "possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the chosen package of course.", "package. :return: tuple of the README.md text and the long_description", "package and create the lockfile. possible_packages = _get_package_list() chosen_package =", "as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION,", "None \"\"\" package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path", "the long_description type. :rtype: typing.Tuple[str, str] \"\"\" with open('README.md', 'r')", "'Operating System :: OS Independent', 'Programming Language :: Python ::", "as backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package()", "Core business logic for `mystery`. This code will run when", "chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a package and", "run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass super().run()", "__init__.py for the package using the chosen package. :param chosen_package:", "Audience :: Other Audience', 'Topic :: Software Development :: Libraries", "Get data regarding the long description of the package. :return:", ":return: list of package names. :rtype: typing.List[str] \"\"\" try: #", "License', 'Operating System :: OS Independent', 'Programming Language :: Python", "inconsequential). ''' ) def _get_long_description_data() -> typing.Tuple[str, str]: \"\"\" Get", "in between setup.py runs, 'mystery' uses a temporary lockfile. dep_lock_path", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming", "name so it could be placed in the __init__.py file.", "setuptools from setuptools.command.sdist import sdist # Load the configuration file.", "unlink the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose", ":rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will be", "import sdist # Load the configuration file. CONFIG_PATH = pathlib.Path('config.json')", "Fix the package name so it could be placed in", "'mystery' in 'sys.modules'. Else, print there was an error. import", "we're trying to import the mystery package (it's \"{package_name}\" this", "typing.Tuple[str, str]: \"\"\" Get data regarding the long description of", "str]: \"\"\" Get data regarding the long description of the", "the package name so it could be placed in the", "packages. :return: list of package names. :rtype: typing.List[str] \"\"\" try:", "# Use the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as", "= urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError: # Use the", "list of package names. :rtype: typing.List[str] \"\"\" try: # Get", "regarding the long description of the package. :return: tuple of", "being built and installed. \"\"\" import json import pathlib import", "names. :rtype: typing.List[str] \"\"\" try: # Get the top PyPI", "placed in the __init__.py file. :param package_name: mystery package name.", "urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError: # Use the offline", ":type package_name: str :return: fixed mystery package name. :rtype: str", "Independent', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "dep_lock_path.unlink() except FileNotFoundError: pass super().run() def _get_package_list() -> typing.List[str]: \"\"\"", "\"\"\" Assemble the lockfile's path. :return: lockfile path. :rtype: pathlib.Path", "-> str: \"\"\" Choose the underlying mysterious package and handle", "# Transform to eligible package name. fixed_package_name = package_name.replace('-', '_')", "package names. :rtype: typing.List[str] \"\"\" try: # Get the top", "chosen package. :param chosen_package: mystery package name. :type chosen_package: str", "nice. Sorry!\") print('Hint: you can always try to reinstall mystery", "handle the lockfile's state. :return: mystery package name. :rtype: str", "None: \"\"\" Dynamically write the __init__.py for the package using", "typing import setuptools from setuptools.command.sdist import sdist # Load the", "\"\"\" Get a list of possible packages. :return: list of", "str) -> str: \"\"\" Fix the package name so it", "the configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def", "Language :: Python :: 3.7', 'Programming Language :: Python ::", "in 'sys.modules'. Else, print there was an error. import sys", "a different package!') sorry = 'try reinstalling mystery and get", "-> typing.List[str]: \"\"\" Get a list of possible packages. :return:", "temporary lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): # Use the", "package of course. return chosen_package def _fix_package_name(package_name: str) -> str:", "could be placed in the __init__.py file. :param package_name: mystery", "list of possible packages. :return: list of package names. :rtype:", "'r') as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE)", "import random import tempfile import urllib.request import typing import setuptools", "this time). # If it exists, overwrite 'mystery' in 'sys.modules'.", "the long description of the package. :return: tuple of the", "'{package_name}' del sys # We care about this only when", "mystery package (it's \"{package_name}\" this time). # If it exists,", "This code will run when the package is being built", ":: MIT License', 'Operating System :: OS Independent', 'Programming Language", "Else, print there was an error. import sys try: import", "only when mystery fails (and even that's inconsequential). ''' )", "author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery", "even that's inconsequential). ''' ) def _get_long_description_data() -> typing.Tuple[str, str]:", "\"\"\" # Transform to eligible package name. fixed_package_name = package_name.replace('-',", "type. :rtype: typing.Tuple[str, str] \"\"\" with open('README.md', 'r') as readme:", "pathlib import random import tempfile import urllib.request import typing import", "\"\"\" with open('README.md', 'r') as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE", "import sys try: import {package_name} except ImportError as error: print('Internal", "mystery package wasn't playing nice. Sorry!\") print('Hint: you can always", "dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a package and create the", "If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there", "\"\"\" Dynamically write the __init__.py for the package using the", "\"\"\" try: # Get the top PyPI packages and use", "[package['project'] for package in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the", "response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError: # Use", "package is being built and installed. \"\"\" import json import", "package_name.replace('-', '_') # Special case for the 'backports' modules. if", "'Topic :: Software Development :: Libraries :: Python Modules', ],", "'try reinstalling mystery and get a different package!' else: sys.modules['mystery']", "fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name def _write_init_py(package_name: str) ->", "exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an", "the chosen dependency consistent in between setup.py runs, 'mystery' uses", "trying to import the mystery package (it's \"{package_name}\" this time).", "package and handle the lockfile's state. :return: mystery package name.", "file. :param package_name: mystery package name. :type package_name: str :return:", "error) print(\"The mystery package wasn't playing nice. Sorry!\") print('Hint: you", "fun python-packages random', classifiers=[ 'Development Status :: 5 - Production/Stable',", "def _fix_package_name(package_name: str) -> str: \"\"\" Fix the package name", "lockfile's path. :return: lockfile path. :rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name'])", "print('Hint: you can always try to reinstall mystery and get", "eligible package name. fixed_package_name = package_name.replace('-', '_') # Special case", "try to reinstall mystery and get a different package!') sorry", "offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw =", ":rtype: None \"\"\" package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True)", "try: import {package_name} except ImportError as error: print('Internal error:', error)", "configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path()", "registered as a replacement for pip's 'sdist' command. \"\"\" def", "else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}'", "the top PyPI packages and use one of them. response", "name='mystery', version='1.0.2', description='It is a riddle, wrapped in a mystery,", "_get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass super().run() def _get_package_list() ->", "\"\"\" package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path =", "wasn't playing nice. Sorry!\") print('Hint: you can always try to", "Use the locked package and unlink the lockfile. chosen_package =", "and get a different package!') sorry = 'try reinstalling mystery", "return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str: \"\"\" Choose the", "Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Intended", "= _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2',", "__init__.py file. :param package_name: mystery package name. :type package_name: str", "= _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is a riddle, wrapped", "of possible packages. :return: list of package names. :rtype: typing.List[str]", "Transform to eligible package name. fixed_package_name = package_name.replace('-', '_') #", "fixed_package_name = package_name.replace('-', '_') # Special case for the 'backports'", "between setup.py runs, 'mystery' uses a temporary lockfile. dep_lock_path =", "mystery package name. :type package_name: str :return: fixed mystery package", "_write_init_py(package_name: str) -> None: \"\"\" Dynamically write the __init__.py for", "Load the configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r'))", "dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass super().run() def", "is a riddle, wrapped in a mystery, inside an enigma.',", "long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random', classifiers=[ 'Development Status ::", "long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random', classifiers=[ 'Development Status", "package. :param chosen_package: mystery package name. :type chosen_package: str :rtype:", "= _get_package_list() chosen_package = random.choice( [package['project'] for package in possible_packages]", "chosen_package = random.choice( [package['project'] for package in possible_packages] ) dep_lock_path.write_text(chosen_package)", ":rtype: str \"\"\" # Transform to eligible package name. fixed_package_name", "= _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass super().run() def _get_package_list()", "to eligible package name. fixed_package_name = package_name.replace('-', '_') # Special", "name. :rtype: str \"\"\" # Transform to eligible package name.", "for the package using the chosen package. :param chosen_package: mystery", "installed. \"\"\" import json import pathlib import random import tempfile", "# We care about this only when mystery fails (and", "command. \"\"\" def run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except", "_get_package_list() chosen_package = random.choice( [package['project'] for package in possible_packages] )", "str: \"\"\" Choose the underlying mysterious package and handle the", "for the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return", "CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path:", "def run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError: pass", "for `mystery`. This code will run when the package is", "if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name def _write_init_py(package_name: str)", "the chosen package. :param chosen_package: mystery package name. :type chosen_package:", "-> typing.Tuple[str, str]: \"\"\" Get data regarding the long description", ":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language", "built and installed. \"\"\" import json import pathlib import random", "# Here we're trying to import the mystery package (it's", "as a replacement for pip's 'sdist' command. \"\"\" def run(self):", "_get_lockfile_path() -> pathlib.Path: \"\"\" Assemble the lockfile's path. :return: lockfile", "there was an error. import sys try: import {package_name} except", "= json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: \"\"\" Assemble the lockfile's", "that's inconsequential). ''' ) def _get_long_description_data() -> typing.Tuple[str, str]: \"\"\"", "overwrite 'mystery' in 'sys.modules'. Else, print there was an error.", "Status :: 5 - Production/Stable', 'License :: OSI Approved ::", "get a different package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ =", "dependency consistent in between setup.py runs, 'mystery' uses a temporary", "urllib.request import typing import setuptools from setuptools.command.sdist import sdist #", ":: Software Development :: Libraries :: Python Modules', ], )", "mystery and get a different package!' else: sys.modules['mystery'] = {package_name}", "file. CONFIG_PATH = pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() ->", "setup.py runs, 'mystery' uses a temporary lockfile. dep_lock_path = _get_lockfile_path()", "use one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read()", "pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will be registered", "course. return chosen_package def _fix_package_name(package_name: str) -> str: \"\"\" Fix", "keep the chosen dependency consistent in between setup.py runs, 'mystery'", "error: print('Internal error:', error) print(\"The mystery package wasn't playing nice.", "data regarding the long description of the package. :return: tuple", "it could be placed in the __init__.py file. :param package_name:", "3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::", "System :: OS Independent', 'Programming Language :: Python :: 3.6',", "name. :type package_name: str :return: fixed mystery package name. :rtype:", "fixed_package_name.replace('_', '.', 1) return fixed_package_name def _write_init_py(package_name: str) -> None:", "= response.read() except urllib.request.URLError: # Use the offline backup file.", "We care about this only when mystery fails (and even", "''' ) def _get_long_description_data() -> typing.Tuple[str, str]: \"\"\" Get data", "the package is being built and installed. \"\"\" import json", "= {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys", "a list of possible packages. :return: list of package names.", "= _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path /", "path. :rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will", "cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages", "except FileNotFoundError: pass super().run() def _get_package_list() -> typing.List[str]: \"\"\" Get", "_choose_mystery_package() -> str: \"\"\" Choose the underlying mysterious package and", "always try to reinstall mystery and get a different package!')", "def _choose_mystery_package() -> str: \"\"\" Choose the underlying mysterious package", "CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery',", "\"\"\" def run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink() except FileNotFoundError:", "print there was an error. import sys try: import {package_name}", "mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist':", "try: # Get the top PyPI packages and use one", "typing.Tuple[str, str] \"\"\" with open('README.md', 'r') as readme: return (readme.read(),", "urllib.request.URLError: # Use the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r')", "import {package_name} except ImportError as error: print('Internal error:', error) print(\"The", "write the __init__.py for the package using the chosen package.", "package name. :rtype: str \"\"\" # To keep the chosen", "-> None: \"\"\" Dynamically write the __init__.py for the package", "random', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License ::", "= 'try reinstalling mystery and get a different package!' else:", "different package!') sorry = 'try reinstalling mystery and get a", "Sorry!\") print('Hint: you can always try to reinstall mystery and", "install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun", "import typing import setuptools from setuptools.command.sdist import sdist # Load", "LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is a riddle,", "runs, 'mystery' uses a temporary lockfile. dep_lock_path = _get_lockfile_path() if", "Choose a package and create the lockfile. possible_packages = _get_package_list()", "and handle the lockfile's state. :return: mystery package name. :rtype:", ":return: mystery package name. :rtype: str \"\"\" # To keep", "an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6',", "dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): # Use the locked package", "init_py_path / '__init__.py' init_py_path.write_text( f''' # Here we're trying to", "a riddle, wrapped in a mystery, inside an enigma.', url='https://github.com/DivoK/mystery',", "reinstalling mystery and get a different package!' else: sys.modules['mystery'] =", "of the package. :return: tuple of the README.md text and", "be placed in the __init__.py file. :param package_name: mystery package", ":: Python :: 3.8', 'Intended Audience :: Other Audience', 'Topic", "-> pathlib.Path: \"\"\" Assemble the lockfile's path. :return: lockfile path.", "-> str: \"\"\" Fix the package name so it could", "_fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py'", "json import pathlib import random import tempfile import urllib.request import", "sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys # We care about this", "Audience', 'Topic :: Software Development :: Libraries :: Python Modules',", "CONFIG['top_x_packages']] def _choose_mystery_package() -> str: \"\"\" Choose the underlying mysterious", "Language :: Python :: 3.8', 'Intended Audience :: Other Audience',", "text and the long_description type. :rtype: typing.Tuple[str, str] \"\"\" with", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7',", ":rtype: typing.Tuple[str, str] \"\"\" with open('README.md', 'r') as readme: return", "author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI", "keywords='mystery setuptools fun python-packages random', classifiers=[ 'Development Status :: 5", "packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools", "README.md text and the long_description type. :rtype: typing.Tuple[str, str] \"\"\"", "package (it's \"{package_name}\" this time). # If it exists, overwrite", "'Intended Audience :: Other Audience', 'Topic :: Software Development ::", "return chosen_package def _fix_package_name(package_name: str) -> str: \"\"\" Fix the", "'__init__.py' init_py_path.write_text( f''' # Here we're trying to import the", "and get a different package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__", "setuptools.setup( name='mystery', version='1.0.2', description='It is a riddle, wrapped in a", "in a mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(),", "from setuptools.command.sdist import sdist # Load the configuration file. CONFIG_PATH", "can always try to reinstall mystery and get a different", "the __init__.py file. :param package_name: mystery package name. :type package_name:", "backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str: \"\"\" Choose", "def _get_package_list() -> typing.List[str]: \"\"\" Get a list of possible", "init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py' init_py_path.write_text( f''' # Here", "(readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data()", "business logic for `mystery`. This code will run when the", "{package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys #", "tuple of the README.md text and the long_description type. :rtype:", "= pathlib.Path('config.json') CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: \"\"\"", "setuptools.command.sdist import sdist # Load the configuration file. CONFIG_PATH =", "when mystery fails (and even that's inconsequential). ''' ) def", "open('README.md', 'r') as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package()", "pip's 'sdist' command. \"\"\" def run(self): dep_lock_path = _get_lockfile_path() try:", "package name. :type chosen_package: str :rtype: None \"\"\" package_name =", "json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: \"\"\" Assemble the lockfile's path.", "sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys # We", "a replacement for pip's 'sdist' command. \"\"\" def run(self): dep_lock_path", "'Development Status :: 5 - Production/Stable', 'License :: OSI Approved", "mystery and get a different package!') sorry = 'try reinstalling", "Language :: Python :: 3.6', 'Programming Language :: Python ::", "long_description type. :rtype: typing.Tuple[str, str] \"\"\" with open('README.md', 'r') as", ") def _get_long_description_data() -> typing.Tuple[str, str]: \"\"\" Get data regarding", "`mystery`. This code will run when the package is being", "json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str: \"\"\" Choose the underlying", "Choose the underlying mysterious package and handle the lockfile's state.", "dep_lock_path.write_text(chosen_package) # Lock the chosen package of course. return chosen_package", "= backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str: \"\"\"", "state. :return: mystery package name. :rtype: str \"\"\" # To", "# Special case for the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_',", "modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name def _write_init_py(package_name:", "random import tempfile import urllib.request import typing import setuptools from", "3.8', 'Intended Audience :: Other Audience', 'Topic :: Software Development", "def _get_long_description_data() -> typing.Tuple[str, str]: \"\"\" Get data regarding the", "possible_packages = _get_package_list() chosen_package = random.choice( [package['project'] for package in", "Python :: 3.8', 'Intended Audience :: Other Audience', 'Topic ::", "package!' else: sys.modules['mystery'] = {package_name} sys.modules['mystery'].__mystery_init_py__ = __file__ sys.modules['mystery'].__mystery_package_name__ =", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "package name. :rtype: str \"\"\" # Transform to eligible package", "# Load the configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG =", "playing nice. Sorry!\") print('Hint: you can always try to reinstall", "lockfile. possible_packages = _get_package_list() chosen_package = random.choice( [package['project'] for package", "description of the package. :return: tuple of the README.md text", "\"\"\" Core business logic for `mystery`. This code will run", "\"\"\" Fix the package name so it could be placed", "= init_py_path / '__init__.py' init_py_path.write_text( f''' # Here we're trying", "package_name: mystery package name. :type package_name: str :return: fixed mystery", "lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a package", "_write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is", ":: OSI Approved :: MIT License', 'Operating System :: OS", "possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() -> str:", "3.7', 'Programming Language :: Python :: 3.8', 'Intended Audience ::", "__file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys # We care about", "'_') # Special case for the 'backports' modules. if fixed_package_name.startswith('backports_'):", "get a different package!') sorry = 'try reinstalling mystery and", "consistent in between setup.py runs, 'mystery' uses a temporary lockfile.", "for package in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the chosen", "the underlying mysterious package and handle the lockfile's state. :return:", "\"\"\" # To keep the chosen dependency consistent in between", "path. :return: lockfile path. :rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class", "package name. :type package_name: str :return: fixed mystery package name.", "for pip's 'sdist' command. \"\"\" def run(self): dep_lock_path = _get_lockfile_path()", "'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup(", "include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random', classifiers=[ 'Development", "# Get the top PyPI packages and use one of", "and use one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw =", "mysterious package and handle the lockfile's state. :return: mystery package", "chosen_package: str :rtype: None \"\"\" package_name = _fix_package_name(package_name) init_py_path =", "backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def _choose_mystery_package() ->", "pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py' init_py_path.write_text( f''' #", "init_py_path.write_text( f''' # Here we're trying to import the mystery", "python-packages random', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License", "Will be registered as a replacement for pip's 'sdist' command.", "mystery package name. :rtype: str \"\"\" # Transform to eligible", "<filename>setup.py \"\"\" Core business logic for `mystery`. This code will", "a temporary lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): # Use", ":rtype: str \"\"\" # To keep the chosen dependency consistent", "str \"\"\" # To keep the chosen dependency consistent in", "package_name = _fix_package_name(package_name) init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path", "sys # We care about this only when mystery fails", "version='1.0.2', description='It is a riddle, wrapped in a mystery, inside", "sdist # Load the configuration file. CONFIG_PATH = pathlib.Path('config.json') CONFIG", "CONFIG = json.load(CONFIG_PATH.open('r')) def _get_lockfile_path() -> pathlib.Path: \"\"\" Assemble the", "an error. import sys try: import {package_name} except ImportError as", "return fixed_package_name def _write_init_py(package_name: str) -> None: \"\"\" Dynamically write", "'License :: OSI Approved :: MIT License', 'Operating System ::", "the package. :return: tuple of the README.md text and the", "pathlib.Path: \"\"\" Assemble the lockfile's path. :return: lockfile path. :rtype:", "str) -> None: \"\"\" Dynamically write the __init__.py for the", "'r') as backup_file: possible_packages_raw = backup_file.read() return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']] def", "import setuptools from setuptools.command.sdist import sdist # Load the configuration", "of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError:", "Production/Stable', 'License :: OSI Approved :: MIT License', 'Operating System", "package and unlink the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else:", "top PyPI packages and use one of them. response =", "and installed. \"\"\" import json import pathlib import random import", "mystery fails (and even that's inconsequential). ''' ) def _get_long_description_data()", "_get_package_list() -> typing.List[str]: \"\"\" Get a list of possible packages.", "sorry = 'try reinstalling mystery and get a different package!'", "error. import sys try: import {package_name} except ImportError as error:", "'Programming Language :: Python :: 3.8', 'Intended Audience :: Other", "packages and use one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw", "mystery package name. :type chosen_package: str :rtype: None \"\"\" package_name", "to reinstall mystery and get a different package!') sorry =", "To keep the chosen dependency consistent in between setup.py runs,", "try: dep_lock_path.unlink() except FileNotFoundError: pass super().run() def _get_package_list() -> typing.List[str]:", ":: 3.7', 'Programming Language :: Python :: 3.8', 'Intended Audience", "and the long_description type. :rtype: typing.Tuple[str, str] \"\"\" with open('README.md',", "underlying mysterious package and handle the lockfile's state. :return: mystery", "return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE =", "PyPI packages and use one of them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link'])", "= random.choice( [package['project'] for package in possible_packages] ) dep_lock_path.write_text(chosen_package) #", "import the mystery package (it's \"{package_name}\" this time). # If", "import json import pathlib import random import tempfile import urllib.request", "class SDistCommand(sdist): \"\"\" Will be registered as a replacement for", "package in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock the chosen package", "import pathlib import random import tempfile import urllib.request import typing", "= pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py' init_py_path.write_text( f'''", "the locked package and unlink the lockfile. chosen_package = dep_lock_path.read_text().strip()", ":param chosen_package: mystery package name. :type chosen_package: str :rtype: None", "Other Audience', 'Topic :: Software Development :: Libraries :: Python", "sys try: import {package_name} except ImportError as error: print('Internal error:',", "name. :rtype: str \"\"\" # To keep the chosen dependency", "fixed mystery package name. :rtype: str \"\"\" # Transform to", "\"\"\" Get data regarding the long description of the package.", "the lockfile. possible_packages = _get_package_list() chosen_package = random.choice( [package['project'] for", "is being built and installed. \"\"\" import json import pathlib", "init_py_path = init_py_path / '__init__.py' init_py_path.write_text( f''' # Here we're", "it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was", "_get_lockfile_path() if dep_lock_path.exists(): # Use the locked package and unlink", "\"\"\" Will be registered as a replacement for pip's 'sdist'", "lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists(): # Use the locked", "error:', error) print(\"The mystery package wasn't playing nice. Sorry!\") print('Hint:", "Special case for the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.',", "package wasn't playing nice. Sorry!\") print('Hint: you can always try", "enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand}, python_requires='>=3.6', include_package_data=True,", "will run when the package is being built and installed.", "_get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is a riddle, wrapped in", "super().run() def _get_package_list() -> typing.List[str]: \"\"\" Get a list of", "'.', 1) return fixed_package_name def _write_init_py(package_name: str) -> None: \"\"\"", "'mystery' uses a temporary lockfile. dep_lock_path = _get_lockfile_path() if dep_lock_path.exists():", "package name. fixed_package_name = package_name.replace('-', '_') # Special case for", "return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will be registered as a", "= _get_lockfile_path() if dep_lock_path.exists(): # Use the locked package and", "Lock the chosen package of course. return chosen_package def _fix_package_name(package_name:", "with open('README.md', 'r') as readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE =", ":type chosen_package: str :rtype: None \"\"\" package_name = _fix_package_name(package_name) init_py_path", "as error: print('Internal error:', error) print(\"The mystery package wasn't playing", "import urllib.request import typing import setuptools from setuptools.command.sdist import sdist", "of course. return chosen_package def _fix_package_name(package_name: str) -> str: \"\"\"", "mystery package name. :rtype: str \"\"\" # To keep the", "_choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It", "str :return: fixed mystery package name. :rtype: str \"\"\" #", "readme: return (readme.read(), 'text/markdown') CHOSEN_PACKAGE = _choose_mystery_package() _write_init_py(CHOSEN_PACKAGE) LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE", "possible packages. :return: list of package names. :rtype: typing.List[str] \"\"\"", "del sys # We care about this only when mystery", "random.choice( [package['project'] for package in possible_packages] ) dep_lock_path.write_text(chosen_package) # Lock", "LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data() setuptools.setup( name='mystery', version='1.0.2', description='It is a", "_get_long_description_data() -> typing.Tuple[str, str]: \"\"\" Get data regarding the long", "case for the 'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1)", "create the lockfile. possible_packages = _get_package_list() chosen_package = random.choice( [package['project']", "lockfile path. :rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\"", "the __init__.py for the package using the chosen package. :param", "= dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a package and create", "long description of the package. :return: tuple of the README.md", "them. response = urllib.request.urlopen(CONFIG['top_pypi_packages_link']) possible_packages_raw = response.read() except urllib.request.URLError: #", "to import the mystery package (it's \"{package_name}\" this time). #", "def _write_init_py(package_name: str) -> None: \"\"\" Dynamically write the __init__.py", ":: Python :: 3.7', 'Programming Language :: Python :: 3.8',", "was an error. import sys try: import {package_name} except ImportError", "lockfile's state. :return: mystery package name. :rtype: str \"\"\" #", "name. :type chosen_package: str :rtype: None \"\"\" package_name = _fix_package_name(package_name)", ") dep_lock_path.write_text(chosen_package) # Lock the chosen package of course. return", "tempfile import urllib.request import typing import setuptools from setuptools.command.sdist import", "Get a list of possible packages. :return: list of package", ":: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming", "'sys.modules'. Else, print there was an error. import sys try:", "1) return fixed_package_name def _write_init_py(package_name: str) -> None: \"\"\" Dynamically", "'backports' modules. if fixed_package_name.startswith('backports_'): fixed_package_name.replace('_', '.', 1) return fixed_package_name def", "logic for `mystery`. This code will run when the package", "and create the lockfile. possible_packages = _get_package_list() chosen_package = random.choice(", "= '{package_name}' del sys # We care about this only", "package_name: str :return: fixed mystery package name. :rtype: str \"\"\"", ":param package_name: mystery package name. :type package_name: str :return: fixed", "Get the top PyPI packages and use one of them.", "backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file: possible_packages_raw = backup_file.read()", "the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: # Choose a", "code will run when the package is being built and", "str: \"\"\" Fix the package name so it could be", "typing.List[str]: \"\"\" Get a list of possible packages. :return: list", "except urllib.request.URLError: # Use the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'],", "you can always try to reinstall mystery and get a", "\"\"\" import json import pathlib import random import tempfile import", "(it's \"{package_name}\" this time). # If it exists, overwrite 'mystery'", "typing.List[str] \"\"\" try: # Get the top PyPI packages and", "\"{package_name}\" this time). # If it exists, overwrite 'mystery' in", "\"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist): \"\"\" Will be registered as", ":return: lockfile path. :rtype: pathlib.Path \"\"\" return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name']) class SDistCommand(sdist):", "python_requires='>=3.6', include_package_data=True, long_description=LONG_DESCRIPTION, long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE, keywords='mystery setuptools fun python-packages random', classifiers=[", "init_py_path = pathlib.Path('mystery') init_py_path.mkdir(exist_ok=True) init_py_path = init_py_path / '__init__.py' init_py_path.write_text(", "def _get_lockfile_path() -> pathlib.Path: \"\"\" Assemble the lockfile's path. :return:", "chosen_package: mystery package name. :type chosen_package: str :rtype: None \"\"\"", ":return: tuple of the README.md text and the long_description type.", "wrapped in a mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>',", ":: Other Audience', 'Topic :: Software Development :: Libraries ::", "# Use the locked package and unlink the lockfile. chosen_package", "care about this only when mystery fails (and even that's", "response.read() except urllib.request.URLError: # Use the offline backup file. with", "about this only when mystery fails (and even that's inconsequential).", "name. fixed_package_name = package_name.replace('-', '_') # Special case for the", "run when the package is being built and installed. \"\"\"", "except ImportError as error: print('Internal error:', error) print(\"The mystery package", "reinstall mystery and get a different package!') sorry = 'try", "chosen package of course. return chosen_package def _fix_package_name(package_name: str) ->", "possible_packages_raw = response.read() except urllib.request.URLError: # Use the offline backup", "inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE], cmdclass={'sdist': SDistCommand},", "Use the offline backup file. with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file:", "and unlink the lockfile. chosen_package = dep_lock_path.read_text().strip() dep_lock_path.unlink() else: #", "= __file__ sys.modules['mystery'].__mystery_package_name__ = '{package_name}' del sys # We care", "pass super().run() def _get_package_list() -> typing.List[str]: \"\"\" Get a list", "'Programming Language :: Python :: 3.7', 'Programming Language :: Python", "Here we're trying to import the mystery package (it's \"{package_name}\"", "fixed_package_name def _write_init_py(package_name: str) -> None: \"\"\" Dynamically write the", "'sdist' command. \"\"\" def run(self): dep_lock_path = _get_lockfile_path() try: dep_lock_path.unlink()", "dep_lock_path.unlink() else: # Choose a package and create the lockfile.", "package using the chosen package. :param chosen_package: mystery package name.", "else: # Choose a package and create the lockfile. possible_packages", "Assemble the lockfile's path. :return: lockfile path. :rtype: pathlib.Path \"\"\"", "# Choose a package and create the lockfile. possible_packages =", "when the package is being built and installed. \"\"\" import", "package name so it could be placed in the __init__.py", "import tempfile import urllib.request import typing import setuptools from setuptools.command.sdist", "chosen_package def _fix_package_name(package_name: str) -> str: \"\"\" Fix the package", "a package and create the lockfile. possible_packages = _get_package_list() chosen_package", "if dep_lock_path.exists(): # Use the locked package and unlink the", "setuptools fun python-packages random', classifiers=[ 'Development Status :: 5 -", "using the chosen package. :param chosen_package: mystery package name. :type", "# To keep the chosen dependency consistent in between setup.py", "a mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>', author_email='<EMAIL>', packages=setuptools.find_packages(), install_requires=[CHOSEN_PACKAGE],", "fails (and even that's inconsequential). ''' ) def _get_long_description_data() ->", "chosen dependency consistent in between setup.py runs, 'mystery' uses a", "str \"\"\" # Transform to eligible package name. fixed_package_name =", "riddle, wrapped in a mystery, inside an enigma.', url='https://github.com/DivoK/mystery', author='<NAME>',", "in the __init__.py file. :param package_name: mystery package name. :type", "the package using the chosen package. :param chosen_package: mystery package", "# Lock the chosen package of course. return chosen_package def" ]
[ "1)) / (m + 1)).reshape((1, n))) pi_dag = np.maximum(pi +", "w = w + alpha * rho * (pi -", "= np.zeros(m) v = np.zeros(n) rho_tilde = rho * 32", "+ alpha * rho * (pi - pi_dag) rho_tilde =", "* pi_dag).sum()) if __name__ == '__main__': try: print(\"Test...\") _mu, _nu,", "np.zeros((m, n)) pi_dag = np.zeros((m, n)) w = np.zeros((m, n))", "m, n = c.shape pi = np.zeros((m, n)) pi_dag =", "pi = np.zeros((m, n)) pi_dag = np.zeros((m, n)) w =", "u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho +", "from utils import get_params import numpy as np import sys", "_mu, _nu, _c = get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except", "= %.5e' % (c * pi_dag).sum()) if __name__ == '__main__':", "(n + 1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum() / (m", "1)).reshape((1, n))) pi_dag = np.maximum(pi + w / rho, 0.0)", "(c * pi_dag).sum()) if __name__ == '__main__': try: print(\"Test...\") _mu,", "python # -*- coding: utf-8 -*- # ======================================= # File", "iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize m, n =", "= get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print (\"", "pi_dag = np.maximum(pi + w / rho, 0.0) u =", "Purpose : implementation for ADMM method # for solving primal", "pi_dag) rho_tilde = rho_tilde / 2 print('error_mu = %.5e' %", "ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print (\" Ctrl+C pressed...\") sys.exit(1)", "# Purpose : implementation for ADMM method # for solving", "1)) + v.reshape((1, n)) - c) / rho + mu.reshape((m,", "- c) / rho + mu.reshape((m, 1)) + nu.reshape((1, n))", "- pi.sum(axis=1)) v = v + alpha * rho *", "np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1)) print('error_nu = %.5e' %", "= ((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c)", "= c.shape pi = np.zeros((m, n)) pi_dag = np.zeros((m, n))", "= rho * 32 while rho_tilde >= rho: for _", "c) / rho + mu.reshape((m, 1)) + nu.reshape((1, n)) +", "np.maximum(pi + w / rho, 0.0) u = u +", "rho, 0.0) u = u + alpha * rho *", "((r.sum(axis=1) - r.sum() / (m + n + 1)) /", "get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print (\" Ctrl+C", "r.sum() / (m + n + 1)) / (n +", "% np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1)) print('error_nu = %.5e'", "# for solving primal problem # ======================================= from utils import", "* (pi - pi_dag) rho_tilde = rho_tilde / 2 print('error_mu", "1)) print('fvall = %.5e' % (c * pi_dag).sum()) if __name__", "print('fvall = %.5e' % (c * pi_dag).sum()) if __name__ ==", "nu.reshape((1, n)) + pi_dag) pi = (r - ((r.sum(axis=1) -", "+ 1)).reshape((1, n))) pi_dag = np.maximum(pi + w / rho,", "- nu, 1)) print('fvall = %.5e' % (c * pi_dag).sum())", "np.zeros(m) v = np.zeros(n) rho_tilde = rho * 32 while", "rho * 32 while rho_tilde >= rho: for _ in", "n)) u = np.zeros(m) v = np.zeros(n) rho_tilde = rho", "+ n + 1)) / (m + 1)).reshape((1, n))) pi_dag", "v + alpha * rho * (nu - pi.sum(axis=0)) w", "- ((r.sum(axis=0) - r.sum() / (m + n + 1))", "mu, 1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) -", "rho_tilde >= rho: for _ in range(iters): r = ((-w", "pi_dag = np.zeros((m, n)) w = np.zeros((m, n)) u =", "/ (m + n + 1)) / (m + 1)).reshape((1,", "# File Name: ADMM_primal.py # Purpose : implementation for ADMM", "0.0) u = u + alpha * rho * (mu", "- r.sum() / (m + n + 1)) / (m", "((r.sum(axis=0) - r.sum() / (m + n + 1)) /", "n)) - c) / rho + mu.reshape((m, 1)) + nu.reshape((1,", "rho_tilde / 2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1)", "(mu - pi.sum(axis=1)) v = v + alpha * rho", "n)) + pi_dag) pi = (r - ((r.sum(axis=1) - r.sum()", "2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu,", "(m + 1)).reshape((1, n))) pi_dag = np.maximum(pi + w /", "v = v + alpha * rho * (nu -", "- mu, 1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0)", "rho_tilde = rho_tilde / 2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis", "# ======================================= # File Name: ADMM_primal.py # Purpose : implementation", "= np.zeros((m, n)) pi_dag = np.zeros((m, n)) w = np.zeros((m,", "%.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1)) print('error_nu =", "% (c * pi_dag).sum()) if __name__ == '__main__': try: print(\"Test...\")", "rho + mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag) pi", "n + 1)) / (n + 1)).reshape((m, 1)) - ((r.sum(axis=0)", "/ (n + 1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum() /", "for ADMM method # for solving primal problem # =======================================", "ADMM_primal.py # Purpose : implementation for ADMM method # for", "_c = get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print", "1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum() / (m + n", "+ w / rho, 0.0) u = u + alpha", "-*- # ======================================= # File Name: ADMM_primal.py # Purpose :", "(m + n + 1)) / (n + 1)).reshape((m, 1))", "- ((r.sum(axis=1) - r.sum() / (m + n + 1))", "_ in range(iters): r = ((-w + u.reshape((m, 1)) +", "pi.sum(axis=1)) v = v + alpha * rho * (nu", "/ rho + mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag)", "initialize m, n = c.shape pi = np.zeros((m, n)) pi_dag", "rho_tilde = rho * 32 while rho_tilde >= rho: for", "+ n + 1)) / (n + 1)).reshape((m, 1)) -", "np import sys def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618):", "n = c.shape pi = np.zeros((m, n)) pi_dag = np.zeros((m,", "alpha * rho * (nu - pi.sum(axis=0)) w = w", "_nu, _c = get_params(64, 'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt:", "while rho_tilde >= rho: for _ in range(iters): r =", "sys def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\"", "np.zeros((m, n)) u = np.zeros(m) v = np.zeros(n) rho_tilde =", "u = np.zeros(m) v = np.zeros(n) rho_tilde = rho *", "n))) pi_dag = np.maximum(pi + w / rho, 0.0) u", "np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1)) print('fvall = %.5e' %", "(nu - pi.sum(axis=0)) w = w + alpha * rho", "print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1))", "+ 1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum() / (m +", "ADMM method # for solving primal problem # ======================================= from", "numpy as np import sys def ADMM_primal(mu, nu, c, iters=10000,", "import sys def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal", "print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1))", "in range(iters): r = ((-w + u.reshape((m, 1)) + v.reshape((1,", "+ pi_dag) pi = (r - ((r.sum(axis=1) - r.sum() /", "+ alpha * rho * (nu - pi.sum(axis=0)) w =", "u = u + alpha * rho * (mu -", "w = np.zeros((m, n)) u = np.zeros(m) v = np.zeros(n)", "Name: ADMM_primal.py # Purpose : implementation for ADMM method #", "pi = (r - ((r.sum(axis=1) - r.sum() / (m +", "= %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1)) print('error_nu", "__name__ == '__main__': try: print(\"Test...\") _mu, _nu, _c = get_params(64,", "* 32 while rho_tilde >= rho: for _ in range(iters):", "# -*- coding: utf-8 -*- # ======================================= # File Name:", "np.zeros((m, n)) w = np.zeros((m, n)) u = np.zeros(m) v", "= np.zeros((m, n)) w = np.zeros((m, n)) u = np.zeros(m)", "get_params import numpy as np import sys def ADMM_primal(mu, nu,", "rho: for _ in range(iters): r = ((-w + u.reshape((m,", "======================================= from utils import get_params import numpy as np import", "0) - nu, 1)) print('fvall = %.5e' % (c *", "- r.sum() / (m + n + 1)) / (n", "\"\"\" # initialize m, n = c.shape pi = np.zeros((m,", "r.sum() / (m + n + 1)) / (m +", "method # for solving primal problem # ======================================= from utils", "* rho * (nu - pi.sum(axis=0)) w = w +", "1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu,", "(r - ((r.sum(axis=1) - r.sum() / (m + n +", "= 0) - nu, 1)) print('fvall = %.5e' % (c", "+ u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho", "coding: utf-8 -*- # ======================================= # File Name: ADMM_primal.py #", "utf-8 -*- # ======================================= # File Name: ADMM_primal.py # Purpose", "* rho * (mu - pi.sum(axis=1)) v = v +", "'random') ADMM_primal(_mu, _nu, _c) except KeyboardInterrupt: print (\" Ctrl+C pressed...\")", "(pi - pi_dag) rho_tilde = rho_tilde / 2 print('error_mu =", "range(iters): r = ((-w + u.reshape((m, 1)) + v.reshape((1, n))", "= np.zeros((m, n)) u = np.zeros(m) v = np.zeros(n) rho_tilde", "+ v.reshape((1, n)) - c) / rho + mu.reshape((m, 1))", "/ rho, 0.0) u = u + alpha * rho", "# ======================================= from utils import get_params import numpy as np", "as np import sys def ADMM_primal(mu, nu, c, iters=10000, rho=1024,", "w + alpha * rho * (pi - pi_dag) rho_tilde", "#!/usr/bin/env python # -*- coding: utf-8 -*- # ======================================= #", "for _ in range(iters): r = ((-w + u.reshape((m, 1))", "primal problem # ======================================= from utils import get_params import numpy", "pi_dag).sum()) if __name__ == '__main__': try: print(\"Test...\") _mu, _nu, _c", "pi_dag) pi = (r - ((r.sum(axis=1) - r.sum() / (m", "/ 2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) -", ": implementation for ADMM method # for solving primal problem", "<reponame>CrazyIvanPro/Optimal_Transport<filename>ADMM_primal.py<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- # =======================================", "= 1) - mu, 1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis", "= rho_tilde / 2 print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis =", "((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c) /", "u + alpha * rho * (mu - pi.sum(axis=1)) v", "print(\"Test...\") _mu, _nu, _c = get_params(64, 'random') ADMM_primal(_mu, _nu, _c)", "rho * (pi - pi_dag) rho_tilde = rho_tilde / 2", "1)) + nu.reshape((1, n)) + pi_dag) pi = (r -", "nu, c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize m,", "\"\"\"ADMM_primal \"\"\" # initialize m, n = c.shape pi =", "= w + alpha * rho * (pi - pi_dag)", "File Name: ADMM_primal.py # Purpose : implementation for ADMM method", "np.zeros(n) rho_tilde = rho * 32 while rho_tilde >= rho:", "c.shape pi = np.zeros((m, n)) pi_dag = np.zeros((m, n)) w", "alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize m, n = c.shape pi", "rho * (nu - pi.sum(axis=0)) w = w + alpha", "# initialize m, n = c.shape pi = np.zeros((m, n))", "(m + n + 1)) / (m + 1)).reshape((1, n)))", "+ 1)) / (m + 1)).reshape((1, n))) pi_dag = np.maximum(pi", "rho * (mu - pi.sum(axis=1)) v = v + alpha", "for solving primal problem # ======================================= from utils import get_params", "= v + alpha * rho * (nu - pi.sum(axis=0))", "if __name__ == '__main__': try: print(\"Test...\") _mu, _nu, _c =", "import numpy as np import sys def ADMM_primal(mu, nu, c,", "* (nu - pi.sum(axis=0)) w = w + alpha *", "implementation for ADMM method # for solving primal problem #", "c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize m, n", "pi.sum(axis=0)) w = w + alpha * rho * (pi", "utils import get_params import numpy as np import sys def", "======================================= # File Name: ADMM_primal.py # Purpose : implementation for", "n)) pi_dag = np.zeros((m, n)) w = np.zeros((m, n)) u", "/ (m + 1)).reshape((1, n))) pi_dag = np.maximum(pi + w", "r = ((-w + u.reshape((m, 1)) + v.reshape((1, n)) -", "% np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1)) print('fvall = %.5e'", "rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize m, n = c.shape", "1)) - ((r.sum(axis=0) - r.sum() / (m + n +", "= %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1)) print('fvall", "== '__main__': try: print(\"Test...\") _mu, _nu, _c = get_params(64, 'random')", "n)) w = np.zeros((m, n)) u = np.zeros(m) v =", "1)) / (n + 1)).reshape((m, 1)) - ((r.sum(axis=0) - r.sum()", "32 while rho_tilde >= rho: for _ in range(iters): r", "mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag) pi = (r", "* (mu - pi.sum(axis=1)) v = v + alpha *", "* rho * (pi - pi_dag) rho_tilde = rho_tilde /", "'__main__': try: print(\"Test...\") _mu, _nu, _c = get_params(64, 'random') ADMM_primal(_mu,", "+ mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag) pi =", "v.reshape((1, n)) - c) / rho + mu.reshape((m, 1)) +", "alpha * rho * (mu - pi.sum(axis=1)) v = v", "%.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1)) print('fvall =", "import get_params import numpy as np import sys def ADMM_primal(mu,", "1) - mu, 1)) print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis =", "+ 1)) / (n + 1)).reshape((m, 1)) - ((r.sum(axis=0) -", "solving primal problem # ======================================= from utils import get_params import", "nu, 1)) print('fvall = %.5e' % (c * pi_dag).sum()) if", "v = np.zeros(n) rho_tilde = rho * 32 while rho_tilde", "= (r - ((r.sum(axis=1) - r.sum() / (m + n", "n + 1)) / (m + 1)).reshape((1, n))) pi_dag =", "= u + alpha * rho * (mu - pi.sum(axis=1))", "+ alpha * rho * (mu - pi.sum(axis=1)) v =", "alpha * rho * (pi - pi_dag) rho_tilde = rho_tilde", "= np.zeros(n) rho_tilde = rho * 32 while rho_tilde >=", "w / rho, 0.0) u = u + alpha *", "- pi_dag) rho_tilde = rho_tilde / 2 print('error_mu = %.5e'", "problem # ======================================= from utils import get_params import numpy as", ">= rho: for _ in range(iters): r = ((-w +", "/ (m + n + 1)) / (n + 1)).reshape((m,", "= np.maximum(pi + w / rho, 0.0) u = u", "- pi.sum(axis=0)) w = w + alpha * rho *", "try: print(\"Test...\") _mu, _nu, _c = get_params(64, 'random') ADMM_primal(_mu, _nu,", "def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" #", "ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618): \"\"\"ADMM_primal \"\"\" # initialize", "%.5e' % (c * pi_dag).sum()) if __name__ == '__main__': try:", "+ nu.reshape((1, n)) + pi_dag) pi = (r - ((r.sum(axis=1)", "-*- coding: utf-8 -*- # ======================================= # File Name: ADMM_primal.py" ]
[ "import argparse parser = argparse.ArgumentParser(description='Clean up the data for a", "del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out += v[9:] fo.write(\"\\t\".join(out)", "open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w') for line in fi:", "the VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to the new VCF", "== '#': fo.write(line) continue line = line.rstrip() v = line.split('\\t');", "pprint import argparse parser = argparse.ArgumentParser(description='Clean up the data for", "#fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w') for line", "out.append(':'.join(params)) for d in v[9:]: dv = d.split(':') del dv[paramIndex]", "the data for a given parameter') parser.add_argument('--infile', help=\"Path to the", "out = v[0:8] try: paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params))", "= argparse.ArgumentParser(description='Clean up the data for a given parameter') parser.add_argument('--infile',", "fi: if len(line) == 0: continue if line[0] == '#':", "import sys import pprint import argparse parser = argparse.ArgumentParser(description='Clean up", "d in v[9:]: dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except", "fo = open(args.outfile, 'w') for line in fi: if len(line)", "for d in v[9:]: dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv))", "default='test.vcf') parser.add_argument('--outfile', help=\"Path to the new VCF file\", default='test.out.vcf') parser.add_argument('--param',", "'w') for line in fi: if len(line) == 0: continue", "up the data for a given parameter') parser.add_argument('--infile', help=\"Path to", "line.rstrip() v = line.split('\\t'); params = v[8].split(':') out = v[0:8]", "parser = argparse.ArgumentParser(description='Clean up the data for a given parameter')", "0: continue if line[0] == '#': fo.write(line) continue line =", "continue if line[0] == '#': fo.write(line) continue line = line.rstrip()", "if line[0] == '#': fo.write(line) continue line = line.rstrip() v", "v = line.split('\\t'); params = v[8].split(':') out = v[0:8] try:", "argparse parser = argparse.ArgumentParser(description='Clean up the data for a given", "dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out", "fi = open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo =", "'w') fo = open(args.outfile, 'w') for line in fi: if", "parser.add_argument('--param', help=\"Parameter to clean\", default='PL') args = parser.parse_args() fi =", "line[0] == '#': fo.write(line) continue line = line.rstrip() v =", "len(line) == 0: continue if line[0] == '#': fo.write(line) continue", "= params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d in v[9:]: dv", "params[paramIndex] out.append(':'.join(params)) for d in v[9:]: dv = d.split(':') del", "= v[8].split(':') out = v[0:8] try: paramIndex = params.index(args.param) del", "help=\"Path to the new VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to", "line.split('\\t'); params = v[8].split(':') out = v[0:8] try: paramIndex =", "help=\"Parameter to clean\", default='PL') args = parser.parse_args() fi = open(args.infile,", "open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w')", "line = line.rstrip() v = line.split('\\t'); params = v[8].split(':') out", "continue line = line.rstrip() v = line.split('\\t'); params = v[8].split(':')", "params = v[8].split(':') out = v[0:8] try: paramIndex = params.index(args.param)", "clean\", default='PL') args = parser.parse_args() fi = open(args.infile, 'r') #fo", "to the VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to the new", "'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w') for", "in fi: if len(line) == 0: continue if line[0] ==", "<reponame>pombase/legacy-eg-loader #!/usr/bin/python import os import sys import pprint import argparse", "for a given parameter') parser.add_argument('--infile', help=\"Path to the VCF file\",", "open(args.outfile, 'w') for line in fi: if len(line) == 0:", "'#': fo.write(line) continue line = line.rstrip() v = line.split('\\t'); params", "data for a given parameter') parser.add_argument('--infile', help=\"Path to the VCF", "sys import pprint import argparse parser = argparse.ArgumentParser(description='Clean up the", "a given parameter') parser.add_argument('--infile', help=\"Path to the VCF file\", default='test.vcf')", "parser.add_argument('--infile', help=\"Path to the VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to", "os import sys import pprint import argparse parser = argparse.ArgumentParser(description='Clean", "argparse.ArgumentParser(description='Clean up the data for a given parameter') parser.add_argument('--infile', help=\"Path", "fo.write(line) continue line = line.rstrip() v = line.split('\\t'); params =", "params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d in v[9:]: dv =", "parser.parse_args() fi = open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo", "to the new VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\",", "= parser.parse_args() fi = open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w')", "default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\", default='PL') args = parser.parse_args() fi", "= open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile, 'w') for line in", "#!/usr/bin/python import os import sys import pprint import argparse parser", "del params[paramIndex] out.append(':'.join(params)) for d in v[9:]: dv = d.split(':')", "file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to the new VCF file\", default='test.out.vcf')", "default='PL') args = parser.parse_args() fi = open(args.infile, 'r') #fo =", "args = parser.parse_args() fi = open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf',", "if len(line) == 0: continue if line[0] == '#': fo.write(line)", "line in fi: if len(line) == 0: continue if line[0]", "parameter') parser.add_argument('--infile', help=\"Path to the VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path", "= open(args.outfile, 'w') for line in fi: if len(line) ==", "parser.add_argument('--outfile', help=\"Path to the new VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter", "v[0:8] try: paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d", "d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out += v[9:]", "dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out += v[9:] fo.write(\"\\t\".join(out) +", "VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to the new VCF file\",", "ValueError: out.append(':'.join(params)) out += v[9:] fo.write(\"\\t\".join(out) + \"\\n\") fi.close() fo.close()", "import os import sys import pprint import argparse parser =", "paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d in v[9:]:", "v[8].split(':') out = v[0:8] try: paramIndex = params.index(args.param) del params[paramIndex]", "out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out += v[9:] fo.write(\"\\t\".join(out) + \"\\n\")", "= open(args.infile, 'r') #fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w') fo = open(args.outfile,", "= line.split('\\t'); params = v[8].split(':') out = v[0:8] try: paramIndex", "v[9:]: dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params))", "VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\", default='PL') args =", "to clean\", default='PL') args = parser.parse_args() fi = open(args.infile, 'r')", "import pprint import argparse parser = argparse.ArgumentParser(description='Clean up the data", "file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\", default='PL') args = parser.parse_args()", "= d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError: out.append(':'.join(params)) out +=", "in v[9:]: dv = d.split(':') del dv[paramIndex] out.append(':'.join(dv)) except ValueError:", "the new VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\", default='PL')", "= v[0:8] try: paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for", "given parameter') parser.add_argument('--infile', help=\"Path to the VCF file\", default='test.vcf') parser.add_argument('--outfile',", "= line.rstrip() v = line.split('\\t'); params = v[8].split(':') out =", "for line in fi: if len(line) == 0: continue if", "except ValueError: out.append(':'.join(params)) out += v[9:] fo.write(\"\\t\".join(out) + \"\\n\") fi.close()", "help=\"Path to the VCF file\", default='test.vcf') parser.add_argument('--outfile', help=\"Path to the", "try: paramIndex = params.index(args.param) del params[paramIndex] out.append(':'.join(params)) for d in", "== 0: continue if line[0] == '#': fo.write(line) continue line", "new VCF file\", default='test.out.vcf') parser.add_argument('--param', help=\"Parameter to clean\", default='PL') args" ]
[ "return [] permutations = [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for", "= p insert_da = self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] =", "\"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name of the corpus to", "tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if", "listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict = dict({ \"sd\": 1,", "factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in", "permutation = [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in", "= 0 self.setname = os.path.split(data_dir)[1] assert self.setname == 'train' or", "= csv.writer(f) if self.task == 'us': for perm in permuted_ixs:", "for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname,", "a sentence \"\"\" permutations = [] for _ in range(amount):", "+= 1 self.trans_num = 0 for trans in self.corpus.iter_transcripts(): self.trans_num", "permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments", "= act.split(' ') acts = acts[:-1] acts = [int(act) for", "while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker", "perturbations for task {}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file", "train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))", "converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def", "if self.task == 'us': for p in permuted_ixs: (insert_sent, insert_da,", "+ [rem_elem] + segm_perm[i_to:] permutation = [] for segm_ix in", "0: return [] permutations = [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1)", "speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)", "in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a", "It gets done in the glove wrapper of mtl_coherence.py else:", "= os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\") as f: csv_writer", "x] # don't convert words to ids (yet). It gets", "trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in", "\"br\": 32, \"no\": 33, \"fp\": 34, \"qrr\": 35, \"arp\": 36,", "valueToFind): listOfKeys = list() for item in dictOfElements.items(): if item[1]", "amount): \"\"\" df is supposed to be a pandas dataframe", "convert_dset(self, amounts): # data_dir is supposed to be the dir", "\".join([str(a) for a in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] =", "continue ix_removed = original[0:ix] + ([] if ix == length-1", "[] acts = [] speaker_ixs = [] prev_act = \"%\"", "= random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix)) return permutations def", "= open(act_file, 'r') of = open(self.output_file, 'w') discarded = 0", "= [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for i in range(amount):", "\" \".join([str(x) for x in acts]) u = str(utterances) pa", "with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) if self.task", "draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af = open(act_file, 'r') of", "segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs", "to use, currently either 'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int,", "original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _ in", "for i in range(amount): while True: permutation = [] segm_perm", "actually: do ... while permutation not in permutations i_from =", "True: speaker = random.randint(0,1) # choose one of the speakers", "in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1]", "as f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda:", "train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs =", "name of the corpus to use, currently either 'DailyDialog' or", "if (not new_sents == permutations[0]) and ( not new_sents in", "1, \"b\": 2, \"sv\": 3, \"aa\": 4, \"%-\": 5, \"ba\":", "= {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs, amount): \"\"\" return", "for x in acts]) u = str(utterances) pa = [acts[i]", "= self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname)", "import tqdm, trange import argparse import numpy as np import", "not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances =", "for seq in seqs] tok_seqs = [[w.lower() for w in", "'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir,", "tokenizer self.word2id = word2id self.task = task self.utt_num = 0", "act = \"%\" if act == \"+\": act = prev_da", "insert_sent, insert_da, name, ix, insert_ix = p insert_da = self.da2num[insert_da]", "= random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm = deepcopy(segments)", "DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter", "train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5,", "swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name,", "in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation +", "segm_ix)) permutation = permutation + utt_ixs if not permutation in", "deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(x) for x", "self.setname == 'train' or self.setname == 'validation' or self.setname ==", "str(pu) if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in", "in permutations: break permutations.append(permutation) return permutations[1:] , segment_permutations #the first", "trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence)", "\" \".join([str(a) for a in acts]) u = str(tok_seqs) pa", "which task the dataset should be created. alternatives: up (utterance", "for each task the seed is the same s.t. the", "= [name, ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm)", "valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p", "files. they'll correspond to the created # splits from the", "for perm in permuted_ixs: (utt, da, name, ix, insert_ix) =", "sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df):", "tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts =", "'r') af = open(act_file, 'r') of = open(output_file, 'w') csv_writer", "speaker_ixs.append(1) if self.task == 'up': permuted_ixs , segment_perms = self.swda_permute(utterances,", "utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task == 'up': permuted_ixs ,", "\"qy\": 7, \"x\": 8, \"ny\": 9, \"fc\": 10, \"%\": 11,", "'us': for p in permuted_ixs: a = \" \".join([str(x) for", "== 0, segments)) speaker_orig = list(filter(lambda x: (x-speaker) % 2", "permuted_ixs: if self.task == 'us': (utt, da, name, ix, insert_ix)", "= list(set(segm_ixs.values())) permutations = [] for i in range(amount): (sentence,", "Counter, defaultdict import sys from nltk import word_tokenize from tqdm", "act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df =", "0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2]", "amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = []", "= random.randint(0,1) # choose one of the speakers speaker_to_perm =", "= str(tok_seqs) pa = [acts[i] for i in p] p_a", "pa = [acts[i] for i in p] p_a = \"", "if not permutation in permutations: permutations.append(permutation) break return permutations[1:], segment_permutations", "permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs", "'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task", "segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = [] permutations", "ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da, name,", "= seqs[:-1] if len(seqs) < 5: discarded += 1 continue", "open(itos_file, \"w\") for (word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def", "acts] for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name =", "permutations.append((sentence, act, swda_name, ix, insert_ix)) return permutations def convert_dset(self, amounts):", "'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount)", "in tok_seqs] acts = act.split(' ') acts = acts[:-1] acts", "segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation = []", "if len(speaker_to_perm) < 2: return [] while True: permuted_speaker_ix =", "13, \"bk\": 14, \"h\": 15, \"qy^d\": 16, \"o\": 17, \"bh\":", "open(self.output_file, 'w') discarded = 0 for line_count, (dial, act) in", "[utterances[i] for i in p] p_u = str(pu) if i", "= random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem = segments[i_from] segm_perm", "utt) in enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count) row =", "constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation',", "perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: for perm in", "permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to, i_from) in", "os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False),", "= 0 for trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num", "in permuted_ixs: if self.task == 'us': (utt, da, name, ix,", "in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation +", "action='store_true', help= \"convert the words to ids\") parser.add_argument('--task', required=True, type=str,", "open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) if self.task ==", "__init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True): self.data_dir =", "in range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation", "pandas dataframe with colums 'act' and 'utt' (utterance), with act", "for i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text)", "draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df is supposed to be a", "= os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir,", "act = \"%\" if act == \"+\": act = prev_act", "\"\"\" if amount is greater than the possible amount of", "in acts]) u = str(utterances) insert_sent, insert_da, name, ix, insert_ix", "if amount == 0: return [] permutations = [list(range(len(sents)))] amount", "j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker = speaker", "True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker ==", "required=True, type=str, help=\"\"\"the name of the corpus to use, currently", "dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file =", "segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation", "converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item", "utterance_insertions(length, amount): possible_permutations = [] original = list(range(length)) for ix", "if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name,", "= open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path):", "permutation + utt_ixs if permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm)", "or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\")", "self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path,", "in acts] for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name", "\"cc\": 41, \"t1\": 42, \"bd\": 43, \"aap\": 44, \"am\": 45,", "= str(pu) if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i", "== len(sent_DAs), \"length of permuted sentences and list of DAs", "acts = acts[:-1] acts = [int(act) for act in acts]", "for i in p] p_u = str(pu) if i in", "in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))", "csv_writer = csv.writer(f) for perm in permuted_ixs: if self.task ==", "up (utterance permutation) us (utterance sampling) hup (half utterance petrurbation)", "\"qw\": 12, \"nn\": 13, \"bk\": 14, \"h\": 15, \"qy^d\": 16,", "act being a number from 1 to 4 and utt", "line_count)) with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) for", "original, which was included s.t. won't be generated def speaker_segment_ixs(self,", "permutations = [list(segm_ixs.keys())] for _ in range(amount): speaker = random.randint(0,1)", "in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts = [] speaker_ixs", "speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation", "switchboard_da_mapping() # CAUTION: make sure that for each task the", "acts \"\"\" \"\"\" if amount is greater than the possible", "def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values()))", "factorial(len(sents))-1) for i in range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist()", "segments = list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())] for", "'test', \"wrong data dir name\" def create_act_utt(self): dial_file = os.path.join(self.data_dir,", "np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))", "for p in permuted_ixs: a = \" \".join([str(x) for x", "permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with", "'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task == 'us':", "sure that for each task the seed is the same", "sentences and list of DAs must be equal\" if amount", "of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id,", "import sys from nltk import word_tokenize from tqdm import tqdm,", "i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))", "in x] # don't convert words to ids (yet). It", "= os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0]", "open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path)", "19, \"bf\": 20, \"na\": 21, \"ny^e\": 22, \"ad\": 23, \"^2\":", "for x in p_a]) p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent", "assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\" with open(self.act_utt_file, 'r') as", "permute(sents, sent_DAs, amount): \"\"\" return a list of different! permuted", "df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert len(sents) == len(sent_DAs),", "da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt,", "utterances.append(sentence) act = utt.damsl_act_tag() if act == None: act =", "== 'us': for perm in permuted_ixs: (utt, da, name, ix,", "csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm) if self.task ==", "won't be generated def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df is", "prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv')", "segments = list(set(segm_ixs.values())) for i in range(amount): while True: permutation", "random.randint(0,1) # choose one of the speakers speaker_to_perm = list(filter(lambda", "for i in p] p_a = \" \".join([str(x) for x", "= [list(range(len(sents)))] for _ in range(amount): while True: speaker =", "if self.task == 'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts,", "in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation +", "amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\") as", "0 for trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num =", "cnt = Counter() for utt in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\",", "convert_dset(self, amounts): # create distinct train/validation/test files. they'll correspond to", "ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task ==", "([] if ix == length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed))", "= [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if", "= seqs[:-1] if len(seqs) < 5: continue tok_seqs = [self.tokenizer(seq)", "= insert_da pa = \" \".join([str(a) for a in p_a])", "amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = []", "\"^2\": 24, \"b^m\": 25, \"qo\": 26, \"qh\": 27, \"^h\": 28,", "shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file)", "list of different! permuted sentences and their respective dialog acts", "(act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count) row", "= deepcopy(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]", "utt_ixs if permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return", "parser.add_argument('--amount', type=int, default=20, help=\"random seed for initialization\") parser.add_argument('--word2id', action='store_true', help=", "permutation + utt_ixs if not permutation in permutations: permutations.append(permutation) break", "files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\" with open(self.act_utt_file, 'r')", "default=\"up\", help=\"\"\"for which task the dataset should be created. alternatives:", "permutations = [] i = 0 for _ in range(amounts):", "i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def", "permutations: permutations.append(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts):", "'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile = open(val_output_file,", "= permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2]", "def switchboard_da_mapping(): mapping_dict = dict({ \"sd\": 1, \"b\": 2, \"sv\":", "for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs =", "range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to, i_from)", "in acts]) u = str(utterances) pa = [acts[i] for i", "the dir with the respective train/test/val-dataset files print(\"Creating {} perturbations", "ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self): r", "for a in pa]) pu = [tok_seqs[i] for i in", "range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments)", "act, swda_name, ix)) def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return", "currently either 'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random", "Vocab file for Switchboard\") cnt = Counter() for utt in", "x: (x-speaker) % 2 == 0, segments)) speaker_orig = list(filter(lambda", "= True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer", "segment_perms: csv_writer.writerow(perm) if self.task == 'us': for p in permuted_ixs:", "{1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs, amount): \"\"\" return a", "the speakers speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 ==", "import csv from sklearn.model_selection import train_test_split from swda.swda import CorpusReader,", "= random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self,", "48, \"ft\":49 }) d = defaultdict(lambda: 11) for (k, v)", "dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping():", "segment_indices = dict() prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs):", "each task the seed is the same s.t. the splits", "= os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file,", "= self.tokenizer(sentence) for w in sentence: cnt[w] += 1 itos_file", "in pa]) pu = [tok_seqs[i] for i in p] p_u", "True: permutation = [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix", "permutations.append(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs", "os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find input files\" assert os.path.isfile(self.act_utt_file),", "for i in p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded)", "permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset", "word2id = lambda x: x tokenizer = word_tokenize if args.corpus", "permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first one", "insert_da, name, ix, insert_ix = p insert_da = self.da2num[insert_da] p_a", "elif self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname", "\"{}_{}\".format(self.setname, line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self,", "elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task,", "line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts):", "\".join([str(a) for a in acts]) u = str(tok_seqs) pa =", "ix_removed = original[0:ix] + ([] if ix == length-1 else", "else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def", "word_tokenize from tqdm import tqdm, trange import argparse import numpy", "tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter =", "insert_ix) = p a = \" \".join([str(a) for a in", "train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da = \"%\" for", "= os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file", "permutations, only the uniquely possible ones are returned \"\"\" assert", "'r') af = open(act_file, 'r') of = open(self.output_file, 'w') discarded", "range(length): if ix == y: continue ix_removed = original[0:ix] +", "insert_ix = p insert_da = self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix]", "= prev_act acts.append(self.da2num[act]) prev_act = act if \"A\" in utt.caller:", "\"t1\": 42, \"bd\": 43, \"aap\": 44, \"am\": 45, \"^g\": 46,", "act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs, amount): \"\"\"", "= i return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs =", "same s.t. the splits will be the same! train_ixs, val_ixs", "self.utt_num = 0 for utt in self.corpus.iter_utterances(): self.utt_num += 1", "\"\"\" \"\"\" if amount is greater than the possible amount", "amount = min(amount, factorial(len(sents))-1) for i in range(amount): permutation =", "'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for", "to ids\") parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for which task the", "while permutation.tolist() in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:]", "else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation =", "but the names are too close if len(speaker_to_perm) < 2:", "permuted sentences and their respective dialog acts \"\"\" \"\"\" if", "segment_permutations #the first one is the original, which was included", "pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file,", "make sure that for each task the seed is the", "= dict() prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if", "deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(a) for a", "return listOfKeys def switchboard_da_mapping(): mapping_dict = dict({ \"sd\": 1, \"b\":", "def create_vocab(self): print(\"Creating Vocab file for Switchboard\") cnt = Counter()", "if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs:", "\"+\": act = prev_act acts.append(self.da2num[act]) prev_act = act if \"A\"", "= dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: discarded", "None self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0", "== 0: return [] permutations = [list(range(len(sents)))] segment_permutations = []", "for ix in original: for y in range(length): if ix", "33, \"fp\": 34, \"qrr\": 35, \"arp\": 36, \"nd\": 37, \"t3\":", "segm_perm = deepcopy(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] +", "len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem =", "acts[:-1] acts = [int(act) for act in acts] if self.task", "os.path.isfile(act_file), \"could not find input files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt", "data dir name\" def create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file", "in permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:]", "(utterance permutation) us (utterance sampling) hup (half utterance petrurbation) ui", "str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer,", "p_u[insert_ix] = insert_sent if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if", "shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans", "and os.path.isfile(act_file), \"could not find input files\" assert os.path.isfile(self.act_utt_file), \"missing", "that for each task the seed is the same s.t.", "16, \"o\": 17, \"bh\": 18, \"^q\": 19, \"bf\": 20, \"na\":", "tqdm, trange import argparse import numpy as np import re", "re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if", "from math import factorial import random from collections import Counter,", "tok_seqs] acts = act.split(' ') acts = acts[:-1] acts =", "\"co\": 40, \"cc\": 41, \"t1\": 42, \"bd\": 43, \"aap\": 44,", "os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if", "[list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for i in range(amount): permutation", "itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if amount ==", "def swda_permute(self, sents, amount, speaker_ixs): if amount == 0: return", "list(set(segment_ixs.values())) segment_permutations = [] permutations = [] i = 0", "\"sd\": 1, \"b\": 2, \"sv\": 3, \"aa\": 4, \"%-\": 5,", "a = \" \".join([str(a) for a in acts]) u =", "== 'us': (utt, da, name, ix, insert_ix) = perm row", "= os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path)", "speaker != prev_speaker: prev_speaker = speaker i += 1 segment_indices[j]", "== \"+\": act = prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name", "os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file),", "acts[:-1] acts = [int(act) for act in acts] for utt_i,", "1 itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file, \"w\") for", "utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count)", "= SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements,", "added!)\"\"\") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f =", "swda_name, ix, insert_ix)) return permutations def convert_dset(self, amounts): # create", "amount == 0: return [] permutations = [list(range(len(sents)))] amount =", "done in the glove wrapper of mtl_coherence.py else: word2id =", "train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs,", "= self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa =", "supposed to be the dir with the respective train/test/val-dataset files", "speaker i += 1 segment_indices[j] = i return segment_indices def", "in pa]) pu = [utterances[i] for i in p] p_u", "_, swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else", "+= 1 self.da2num = switchboard_da_mapping() # CAUTION: make sure that", "os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not", "shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)", "or speaker_ixs, they are something different, but the names are", "lambda x: [word2id_dict[y] for y in x] # don't convert", "= open(itos_file, \"w\") for (word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue", "\"w\") as f: csv_writer = csv.writer(f) for perm in permuted_ixs:", "np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):", "tqdm import tqdm, trange import argparse import numpy as np", "segments = list(set(segm_ixs.values())) permutations = [] for i in range(amount):", "\" \".join([str(a) for a in acts]) u = str(tok_seqs) p_a", "prev_speaker = speaker i += 1 segment_indices[j] = i return", "alternatives: up (utterance permutation) us (utterance sampling) hup (half utterance", "[name, ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm) if", "the glove wrapper of mtl_coherence.py else: word2id = lambda x:", "act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir =", "data_dir self.tokenizer = tokenizer self.word2id = word2id self.task = task", "swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name", "\"w\") for (word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self,", "= insert_sent if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i", "args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt()", "act, swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act,", "self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert self.setname == 'train'", "i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts = []", "which was included s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs):", "p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self,", "return permutations[1:] def utterance_insertions(length, amount): possible_permutations = [] original =", "'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs", "permutation + utt_ixs if permutation not in permutations: break permutations.append(permutation)", "Transcript, Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs,", "self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs , segment_perms =", "task the dataset should be created. alternatives: up (utterance permutation)", "# don't convert words to ids (yet). It gets done", "self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if act == None: act", "self.utt_num += 1 self.trans_num = 0 for trans in self.corpus.iter_transcripts():", "\"aa\": 4, \"%-\": 5, \"ba\": 6, \"qy\": 7, \"x\": 8,", "assert len(sents) == len(sent_DAs), \"length of permuted sentences and list", "permutations = [] for _ in range(amount): i = random.randint(0,", "valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main():", "43, \"aap\": 44, \"am\": 45, \"^g\": 46, \"qw^d\": 47, \"fa\":", "for (word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents,", "i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs:", "== 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task ==", "generated def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices = dict()", "_ in range(amounts): while True: # actually: do ... while", "i in range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix", "initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random seed for initialization\") parser.add_argument('--word2id', action='store_true',", "+ segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation", "[] for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation", "permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui': permuted_ixs", "mapping_dict.items(): d[k] = v return d if __name__ == \"__main__\":", "ids (yet). It gets done in the glove wrapper of", "# [:-4] with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f)", "if act == \"+\": act = prev_act acts.append(self.da2num[act]) prev_act =", "= perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: for perm", "= permute(tok_seqs, acts, amounts) elif self.task == 'us': permuted_ixs =", "acts = [int(act) for act in acts] if self.task ==", "be generated def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df is supposed", "for act in acts] for utt_i, (act, utt) in enumerate(zip(acts,", "ix == length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations =", "import pandas as pd from math import factorial import random", "[] for segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation", "act in acts] for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):", "shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs", "task the seed is the same s.t. the splits will", "prev_act acts.append(self.da2num[act]) prev_act = act if \"A\" in utt.caller: speaker_ixs.append(0)", "amount = min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values()))", "= speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker:", "are too close if len(speaker_to_perm) < 2: return [] while", "= [] for _ in range(amount): i = random.randint(0, len(possible_permutations)-1)", "if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances", "def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix],", "def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values()))", "permuted_ixs: a = \" \".join([str(x) for x in acts]) u", "in range(amount): while True: permutation = [] segm_perm = np.random.permutation(len(segments))", "train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs", "'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file,", "os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find input", "in p_a]) p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent if i", "sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not", "\"r\") word2id_dict = dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()]", "open(dial_file, 'r') af = open(act_file, 'r') of = open(self.output_file, 'w')", "\"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if", "str(utterances) insert_sent, insert_da, name, ix, insert_ix = p insert_da =", "% 2 == 0, segments)) speaker_orig = list(filter(lambda x: (x-speaker)", "return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs)", "for a in acts]) u = str(tok_seqs) p_a = deepcopy(acts)", "of DAs must be equal\" permutations = [list(range(len(sents)))] for _", "speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices = dict() prev_speaker =", "\"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path =", "SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus =", "= task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname =", "2 == 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents)))", "(dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs", "in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\",", "'us': for p in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix)", "'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir,", "= [] acts = [] speaker_ixs = [] prev_act =", "argparse import numpy as np import re import csv from", "speaker_ixs) elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif", "permutation in permutations: permutations.append(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self,", "import factorial import random from collections import Counter, defaultdict import", "'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not", "permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length, amount): possible_permutations = []", "\"o\": 17, \"bh\": 18, \"^q\": 19, \"bf\": 20, \"na\": 21,", "'r') of = open(self.output_file, 'w') discarded = 0 for line_count,", "tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts = act.split('", "p_u = str(pu) if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if", "in sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf", "dir name\" def create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file =", "task self.utt_num = 0 for utt in self.corpus.iter_utterances(): self.utt_num +=", "amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations =", "word2id_dict = dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()] =", "train_test_split from swda.swda import CorpusReader, Transcript, Utterance act2word = {1:\"inform\",2:\"question\",", "factorial import random from collections import Counter, defaultdict import sys", "the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir,", "self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if", "open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) for perm in", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input", "parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for which task the dataset should", "= min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for", "(act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is", "x in p_a]) p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent if", "help=\"\"\"The input directory where the files of the corpus are", "= \"{}_{}\".format(self.setname, line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def", "= i_from if (not new_sents == permutations[0]) and ( not", "\"w\") as f: csv_writer = csv.writer(f) if self.task == 'us':", "converter.create_act_utt() elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id,", "val_ixs, test_ixs self.utt_da_pairs = [] prev_da = \"%\" for i,", "if ix == length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations", "= self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if act == None:", "self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in range(amount): while True:", "0 segment_indices = dict() prev_speaker = speaker_ixs[0] for j,speaker in", "open(act_file, 'r') of = open(self.output_file, 'w') discarded = 0 for", "import word_tokenize from tqdm import tqdm, trange import argparse import", "i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser()", "range(amount): while True: permutation = [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm)", "== 'validation' or self.setname == 'test', \"wrong data dir name\"", "respective train/test/val-dataset files print(\"Creating {} perturbations for task {}\".format(amounts, self.task))", "p in permuted_ixs: a = \" \".join([str(a) for a in", "\"qy^d\": 16, \"o\": 17, \"bh\": 18, \"^q\": 19, \"bf\": 20,", "for i in range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent()", "37, \"t3\": 38, \"oo\": 39, \"co\": 40, \"cc\": 41, \"t1\":", "'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile", "'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file", "< 5: continue tok_seqs = [self.tokenizer(seq) for seq in seqs]", "1 self.da2num = switchboard_da_mapping() # CAUTION: make sure that for", "names are too close if len(speaker_to_perm) < 2: return []", "self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file", "in range(amounts): while True: # actually: do ... while permutation", "testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not", "should be created. alternatives: up (utterance permutation) us (utterance sampling)", "not in permutations: break permutations.append(permutation) return permutations[1:] , segment_permutations #the", "permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) = p a =", "list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())] for _ in", "self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) #", "speaker == 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig", "if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs:", "\" \".join([str(x) for x in pa]) pu = [utterances[i] for", "\" \".join([str(a) for a in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix]", "(half utterance petrurbation) ui (utterance insertion, nothing directly added!)\"\"\") args", "self.trans_num += 1 self.da2num = switchboard_da_mapping() # CAUTION: make sure", "test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir,", ", segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us':", "= permutation + utt_ixs if permutation not in permutations: permutations.append(permutation)", "self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = [] for i in", "wrapper of mtl_coherence.py else: word2id = lambda x: x tokenizer", "utt_ixs if permutation not in permutations: break permutations.append(permutation) return permutations[1:]", "dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file =", "y in range(length): if ix == y: continue ix_removed =", "\"A\" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task == 'up':", "os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile =", "(insert_sent, insert_da, name, ix, insert_ix) = p a = \"", "class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset =", "args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)", "i in range(amount): while True: permutation = [] segm_perm =", "print(\"Creating Vocab file for Switchboard\") cnt = Counter() for utt", "self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task", "elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task", "class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus", "\" \".join([str(x) for x in p_a]) p_u = deepcopy(utterances) p_u[insert_ix]", "for Switchboard\") cnt = Counter() for utt in self.corpus.iter_utterances(): sentence", "acts = act.split(' ') acts = acts[:-1] acts = [int(act)", "= deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(x) for", "'train' or self.setname == 'validation' or self.setname == 'test', \"wrong", "in p] p_a = \" \".join([str(a) for a in pa])", "= list(set(segm_ixs.values())) for i in range(amount): while True: permutation =", "= speaker_orig segment_permutations.append(new_segments) permutation = [] for segm_ix in new_segments:", "prev_da = \"%\" for i, utt in enumerate(self.corpus.iter_utterances()): sentence =", "random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs =", "# choose one of the speakers speaker_ix = list(filter(lambda x:", "in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))", "help=\"\"\"for which task the dataset should be created. alternatives: up", "\"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir", "40, \"cc\": 41, \"t1\": 42, \"bd\": 43, \"aap\": 44, \"am\":", "x in pa]) pu = [utterances[i] for i in p]", "SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind):", "insert_da pa = \" \".join([str(x) for x in p_a]) p_u", "= open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict = dict() for i, word", "\"aap\": 44, \"am\": 45, \"^g\": 46, \"qw^d\": 47, \"fa\": 48,", "word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys =", "= open(dial_file, 'r') af = open(act_file, 'r') of = open(self.output_file,", "seed for initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert the words to", "= self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = [] for i", "permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task == 'us': permuted_ixs", "'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir,", "equal\" if amount == 0: return [] permutations = [list(range(len(sents)))]", "segments)) speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0,", "amount, speaker_ixs): if amount == 0: return [] permutations =", "self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs,", "self.data_dir = data_dir self.tokenizer = tokenizer self.word2id = word2id self.task", "self.tokenizer = tokenizer self.word2id = word2id self.output_file = None self.task", "'validation' or self.setname == 'test', \"wrong data dir name\" def", "= os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname == 'validation'", "\"ny\": 9, \"fc\": 10, \"%\": 11, \"qw\": 12, \"nn\": 13,", "name, ix, insert_ix = p insert_da = self.da2num[insert_da] p_a =", "else: word2id = lambda x: x tokenizer = word_tokenize if", "the names are too close if len(speaker_to_perm) < 2: return", "permute(tok_seqs, acts, amounts) elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df,", "list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from", "amount): possible_permutations = [] original = list(range(length)) for ix in", "if self.task == 'us': for p in permuted_ixs: a =", "acts]) u = str(tok_seqs) pa = [acts[i] for i in", "random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self, data_dir,", "[] for _ in range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i])", "speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker", "= 0 for _ in range(amounts): while True: # actually:", "if self.task == 'us': for perm in permuted_ixs: (utt, da,", "seqs[:-1] if len(seqs) < 5: discarded += 1 continue tok_seqs", "os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if", "utt being a sentence \"\"\" permutations = [] for _", "# choose one of the speakers speaker_to_perm = list(filter(lambda x:", "and utt being a sentence \"\"\" permutations = [] for", "list(set(segm_ixs.values())) permutations = [] for i in range(amount): (sentence, act,", "swda_permute(self, sents, amount, speaker_ixs): if amount == 0: return []", "31, \"br\": 32, \"no\": 33, \"fp\": 34, \"qrr\": 35, \"arp\":", "random.randint(0,1) # choose one of the speakers speaker_ix = list(filter(lambda", "swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations", "[] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs", "utt.damsl_act_tag() if act == None: act = \"%\" if act", "acts.append(self.da2num[act]) prev_act = act if \"A\" in utt.caller: speaker_ixs.append(0) else:", "required=True, type=str, help=\"\"\"The input directory where the files of the", "ix, insert_ix = p insert_da = self.da2num[insert_da] p_a = deepcopy(acts)", "list(filter(lambda x: (x-speaker) % 2 == 0, segments)) speaker_orig =", "#getKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if amount == 0:", "csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task == 'us': for p", "as pd from math import factorial import random from collections", "the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs,", "in permuted_ixs: a = \" \".join([str(x) for x in acts])", "cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if amount", "... while permutation not in permutations i_from = random.randint(0, len(segments)-1)", "be generated def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices =", "close if len(speaker_to_perm) < 2: return [] while True: permuted_speaker_ix", "name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix = random.randint(0,", "= [] i = 0 for _ in range(amounts): while", "open(act_file, 'r') of = open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|')", "new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return", "( not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents)", "output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af =", "included s.t. won't be generated def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\"", "= None self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics =", "[] permutations = [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for i", "= ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert self.setname", "if \"A\" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task ==", "ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1)", "= i word2id = lambda x: [word2id_dict[y] for y in", "and list of DAs must be equal\" permutations = [list(range(len(sents)))]", "in acts] if self.task == 'up': permuted_ixs = permute(tok_seqs, acts,", "f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict = dict() for i,", "in original: for y in range(length): if ix == y:", "elif self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file =", "= open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir,", "\"ft\":49 }) d = defaultdict(lambda: 11) for (k, v) in", "s.t. the splits will be the same! train_ixs, val_ixs =", "elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif", "% 2 != 0, segments)) #TODO: rename either speaker_ix or", "self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab file for Switchboard\") cnt =", "prev_speaker: prev_speaker = speaker i += 1 segment_indices[j] = i", "11) for (k, v) in mapping_dict.items(): d[k] = v return", "[] original = list(range(length)) for ix in original: for y", "deepcopy(utterances) p_u[insert_ix] = insert_sent if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))", "[int(act) for act in acts] for utt_i, (act, utt) in", "open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile = open(test_output_file, 'w')", "act in acts] if self.task == 'up': permuted_ixs = permute(tok_seqs,", "= str(utterances) insert_sent, insert_da, name, ix, insert_ix = p insert_da", "os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r')", "else: for p in permuted_ixs: a = \" \".join([str(x) for", "= os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, \"w\") as f:", "len(permuted_ixs) if self.task == 'us': for p in permuted_ixs: (insert_sent,", "os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id self.output_file =", "possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _ in range(amount): i =", "for utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0", "in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence))", "len(speaker_to_perm) < 2: return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()", "ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms: csv_writer.writerow(perm) if self.task", "segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments =", "None: act = \"%\" if act == \"+\": act =", "= csv.writer(of, delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(df, af)),", "insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm)", "return permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='',", "for segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation =", "'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test',", "train/validation/test files. they'll correspond to the created # splits from", "convert words to ids (yet). It gets done in the", "= self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in range(amount): while", "seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer", "from ast import literal_eval import pandas as pd from math", "for a in acts]) u = str(tok_seqs) pa = [acts[i]", "one of the speakers speaker_ix = list(filter(lambda x: (x-speaker) %", "self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs,", "= [] for segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))", "\"length of permuted sentences and list of DAs must be", "'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task == 'hup':", "dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is supposed to", "self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da =", "total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if len(seqs) <", "swda_name, ix)) def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r]", "open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task))", "= sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if not", "self.task == 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif", "== 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed) converter.create_vocab()", "either speaker_ix or speaker_ixs, they are something different, but the", "') acts = acts[:-1] acts = [int(act) for act in", "permuted sentences and list of DAs must be equal\" if", "if amount == 0: return [] permutations = [list(range(len(sents)))] segment_permutations", "parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input directory where the files of", "is supposed to be the dir with the respective train/test/val-dataset", "for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation =", "in enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count) row = (act,", "\"%\" if act == \"+\": act = prev_act acts.append(self.da2num[act]) prev_act", "= os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task))", "\"ad\": 23, \"^2\": 24, \"b^m\": 25, \"qo\": 26, \"qh\": 27,", "segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs =", "'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')):", "= \"%\" if act == \"+\": act = prev_act acts.append(self.da2num[act])", "equal\" permutations = [list(range(len(sents)))] for _ in range(amount): while True:", "i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents", "gets done in the glove wrapper of mtl_coherence.py else: word2id", "len(seqs) < 5: discarded += 1 continue tok_seqs = [self.tokenizer(seq)", "random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem", "task {}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir,", "= list() for item in dictOfElements.items(): if item[1] == valueToFind:", "testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The", "= [utterances[i] for i in p] p_u = str(pu) if", "the created # splits from the constructor train_output_file = os.path.join(self.data_dir,", "'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if", "\"itos.txt\") itosf = open(itos_file, \"w\") for (word, _) in cnt.most_common(25000):", "= str(utterances) pa = [acts[i] for i in p] p_a", "9, \"fc\": 10, \"%\": 11, \"qw\": 12, \"nn\": 13, \"bk\":", "new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 : new_segments[::2] =", "if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')):", "is the original, which was included s.t. won't be generated", "trainfile = open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile =", "listOfKeys def switchboard_da_mapping(): mapping_dict = dict({ \"sd\": 1, \"b\": 2,", "random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix)) return permutations def convert_dset(self,", "if permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations,", "df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert len(sents) == len(sent_DAs), \"length", "'act' and 'utt' (utterance), with act being a number from", "act = utt.damsl_act_tag() if act == None: act = \"%\"", "draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix))", "i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence", "not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile", "the files of the corpus are located. \"\"\") parser.add_argument(\"--corpus\", required=True,", "'w') csv_writer = csv.writer(of, delimiter='|') for line_count, (dial, act) in", "self.word2id = word2id self.task = task self.utt_num = 0 for", "to be the dir with the respective train/test/val-dataset files print(\"Creating", "= list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] =", "= 0 for utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num", "nltk import word_tokenize from tqdm import tqdm, trange import argparse", "sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if not permutation", "segm_perm[i_to:] permutation = [] for segm_ix in segm_perm: utt_ixs =", "insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: for", "self.task == 'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)", "= \"%\" if act == \"+\": act = prev_da _,", "collections import Counter, defaultdict import sys from nltk import word_tokenize", "act == None: act = \"%\" if act == \"+\":", "amounts): # create distinct train/validation/test files. they'll correspond to the", "\"missing act_utt.txt in data_dir\" with open(self.act_utt_file, 'r') as f: act_utt_df", "+ utt_ixs if not permutation in permutations: permutations.append(permutation) break return", "perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics +=", "speaker_ixs = [] prev_act = \"%\" for utt in trans.utterances:", "= \"%\" for i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\",", "in p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter:", "p_a[insert_ix] = insert_da pa = \" \".join([str(a) for a in", "task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir, tokenizer,", "permutations[0]) and ( not new_sents in permutations or len(permutations) >", "38, \"oo\": 39, \"co\": 40, \"cc\": 41, \"t1\": 42, \"bd\":", "be a pandas dataframe with colums 'act' and 'utt' (utterance),", "not find input files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\"", "sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix)) return", "amounts) elif self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file", "same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs", "new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix", "amount == 0: return [] permutations = [list(range(len(sents)))] segment_permutations =", "x tokenizer = word_tokenize if args.corpus == 'DailyDialog': converter =", "= os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file, \"w\") for (word, _)", "= [acts[i] for i in p] p_a = \" \".join([str(x)", "new_sents[i_to] = i_from if (not new_sents == permutations[0]) and (", "permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation = [] for segm_ix", "s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs): i = 0", "directly added!)\"\"\") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f", "tokenizer self.word2id = word2id self.output_file = None self.task = task", "ranking_dataset = True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')", "possible amount of permutations, only the uniquely possible ones are", "w in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq)", "= np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first one is the", "self.setname == 'test', \"wrong data dir name\" def create_act_utt(self): dial_file", "\" \".join([str(a) for a in pa]) pu = [tok_seqs[i] for", "\"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--amount', type=int,", "'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus", "acts, amounts) elif self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts)", "46, \"qw^d\": 47, \"fa\": 48, \"ft\":49 }) d = defaultdict(lambda:", "csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task == 'us':", "(utterance), with act being a number from 1 to 4", "\"arp\": 36, \"nd\": 37, \"t3\": 38, \"oo\": 39, \"co\": 40,", "== 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif", "in permutations: permutations.append(permutation) break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs,", "28, \"ar\": 29, \"ng\": 30, \"nn^e\": 31, \"br\": 32, \"no\":", "in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mapping() # CAUTION:", "permutation.tolist() in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the", "\"b\": 2, \"sv\": 3, \"aa\": 4, \"%-\": 5, \"ba\": 6,", "= os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir,", "= tokenizer self.word2id = word2id self.output_file = None self.task =", "= open(act_file, 'r') of = open(output_file, 'w') csv_writer = csv.writer(of,", "== 'up': permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif", "in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents ==", "if item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict", "data_dir is supposed to be the dir with the respective", "won't be generated def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices", ": new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] =", "= [int(act) for act in acts] if self.task == 'up':", "\"bd\": 43, \"aap\": 44, \"am\": 45, \"^g\": 46, \"qw^d\": 47,", "amount is greater than the possible amount of permutations, only", "dataset should be created. alternatives: up (utterance permutation) us (utterance", "swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations", "help=\"random seed for initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert the words", "segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = [] permutations", "== 0: return [] permutations = [list(range(len(sents)))] amount = min(amount,", "copy import deepcopy from ast import literal_eval import pandas as", "os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation'))", "numpy as np import re import csv from sklearn.model_selection import", "train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs,", "segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations = [] for", "glove wrapper of mtl_coherence.py else: word2id = lambda x: x", "permutation) us (utterance sampling) hup (half utterance petrurbation) ui (utterance", "will be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8,", "= acts[:-1] acts = [int(act) for act in acts] for", "must be equal\" permutations = [list(range(len(sents)))] for _ in range(amount):", "= half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui': permuted_ixs =", "permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs,", "p_a[insert_ix] = insert_da pa = \" \".join([str(x) for x in", "word2id_dict[word[:-1].lower()] = i word2id = lambda x: [word2id_dict[y] for y", "acts, amounts) elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs),", "data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id =", "= \" \".join([str(a) for a in p_a]) p_u = deepcopy(tok_seqs)", "\".join([str(a) for a in acts]) u = str(tok_seqs) p_a =", "for perm in permuted_ixs: if self.task == 'us': (utt, da,", "__init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir", "= [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm:", "while True: permutation = [] segm_perm = np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for", "sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.tokenizer(sentence) for w", "utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs]", "the same s.t. the splits will be the same! train_ixs,", "i_from if (not new_sents == permutations[0]) and ( not new_sents", "== 'us': for p in permuted_ixs: a = \" \".join([str(x)", "name, ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row)", "names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af", "sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf =", "self.utt_da_pairs = [] prev_da = \"%\" for i, utt in", "= [] prev_da = \"%\" for i, utt in enumerate(self.corpus.iter_utterances()):", "speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))", "x: [word2id_dict[y] for y in x] # don't convert words", "speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))", "dir with the respective train/test/val-dataset files print(\"Creating {} perturbations for", "speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task == 'up': permuted_ixs , segment_perms", "break permutations.append(permutation) return permutations[1:] , segment_permutations #the first one is", "ui (utterance insertion, nothing directly added!)\"\"\") args = parser.parse_args() random.seed(args.seed)", "is the same s.t. the splits will be the same!", "and their respective dialog acts \"\"\" \"\"\" if amount is", "i return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs)", "swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self):", "for _ in range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return", "not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break", "[] permutations = [list(range(len(sents)))] segment_permutations = [] amount = min(amount,", "in data_dir\" with open(self.act_utt_file, 'r') as f: act_utt_df = pd.read_csv(f,", "def half_perturb(sents, sent_DAs, amount): assert len(sents) == len(sent_DAs), \"length of", "6, \"qy\": 7, \"x\": 8, \"ny\": 9, \"fc\": 10, \"%\":", "permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents == permutations[0]) and", "= dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: continue", "permutations def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix],", "args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict = dict() for", "rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af =", "row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs)", "{}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname))", "if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not", "generated def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df is supposed to", "for w in utt] for utt in tok_seqs] tok_seqs =", "csv_writer.writerow(perm) if self.task == 'us': for p in permuted_ixs: a", "= [] for i in range(amount): (sentence, act, swda_name, ix)", "in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self,", "in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda x: [word2id_dict[y]", "# data_dir is supposed to be the dir with the", "created # splits from the constructor train_output_file = os.path.join(self.data_dir, 'train',", "= defaultdict(lambda: 11) for (k, v) in mapping_dict.items(): d[k] =", "= min(amount, factorial(len(sents))-1) for i in range(amount): permutation = np.random.permutation(len(sents))", "(x-speaker) % 2 == 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents", "in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) = p a", "if len(seqs) < 5: continue tok_seqs = [self.tokenizer(seq) for seq", "4 and utt being a sentence \"\"\" permutations = []", "[[w.lower() for w in utt] for utt in tok_seqs] tok_seqs", "permutation not in permutations i_from = random.randint(0, len(segments)-1) i_to =", "utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence =", "in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker = speaker i", "trange import argparse import numpy as np import re import", "= \" \".join([str(x) for x in pa]) pu = [utterances[i]", "47, \"fa\": 48, \"ft\":49 }) d = defaultdict(lambda: 11) for", "type=str, help=\"\"\"The input directory where the files of the corpus", "for act in acts] if self.task == 'up': permuted_ixs =", "if self.task == 'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif", "self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else:", "def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values()))", "of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a = \"", "2: return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments =", "insert_ix)) return permutations def convert_dset(self, amounts): # create distinct train/validation/test", "== 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] = speaker_orig else:", "with open(self.act_utt_file, 'r') as f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])", "utt in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence =", "+= 1 continue tok_seqs = [self.tokenizer(seq) for seq in seqs]", "speaker_ixs): if amount == 0: return [] permutations = [list(range(len(sents)))]", "= [] amount = min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments", "from copy import deepcopy from ast import literal_eval import pandas", "= pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df =", "permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations", "valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for", "or self.setname == 'test', \"wrong data dir name\" def create_act_utt(self):", "af)), total=11118): seqs = dial.split('__eou__') seqs = seqs[:-1] if len(seqs)", "True: # actually: do ... while permutation not in permutations", "len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount):", "converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus ==", "4:\"commissive\"} def permute(sents, sent_DAs, amount): \"\"\" return a list of", "perm in permuted_ixs: (utt, da, name, ix, insert_ix) = perm", "len(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts,", "DAs must be equal\" permutations = [list(range(len(sents)))] for _ in", "sent_DAs, amount): \"\"\" return a list of different! permuted sentences", "= [[w.lower() for w in utt] for utt in tok_seqs]", "self.tokenizer(sentence) for w in sentence: cnt[w] += 1 itos_file =", "for x in acts]) u = str(utterances) insert_sent, insert_da, name,", "ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _ in range(amount): i", "permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task ==", "17, \"bh\": 18, \"^q\": 19, \"bf\": 20, \"na\": 21, \"ny^e\":", "utt.text) sentence = self.tokenizer(sentence) for w in sentence: cnt[w] +=", "import train_test_split from swda.swda import CorpusReader, Transcript, Utterance act2word =", "be equal\" permutations = [list(range(len(sents)))] for _ in range(amount): while", "p in permuted_ixs: a = \" \".join([str(x) for x in", "5, \"ba\": 6, \"qy\": 7, \"x\": 8, \"ny\": 9, \"fc\":", "\"h\": 15, \"qy^d\": 16, \"o\": 17, \"bh\": 18, \"^q\": 19,", "self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert", "sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return", "if self.task == 'us': (utt, da, name, ix, insert_ix) =", "act = prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4]", "created. alternatives: up (utterance permutation) us (utterance sampling) hup (half", "import os from copy import deepcopy from ast import literal_eval", "(not new_sents == permutations[0]) and ( not new_sents in permutations", "insert_da pa = \" \".join([str(a) for a in p_a]) p_u", "trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mapping() #", "and 'utt' (utterance), with act being a number from 1", "task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1]", "the uniquely possible ones are returned \"\"\" assert len(sents) ==", "p_a = \" \".join([str(x) for x in pa]) pu =", "= [tok_seqs[i] for i in p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))", "they are something different, but the names are too close", "tokenizer, word2id, task='', ranking_dataset = True): self.data_dir = data_dir self.act_utt_file", "from sklearn.model_selection import train_test_split from swda.swda import CorpusReader, Transcript, Utterance", "they'll correspond to the created # splits from the constructor", "\"%\" for i, utt in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",", "zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not new_sents == permutations[0])", "\"ar\": 29, \"ng\": 30, \"nn^e\": 31, \"br\": 32, \"no\": 33,", "return permutations[1:] #the first one is the original, which was", "being a number from 1 to 4 and utt being", "= [] prev_act = \"%\" for utt in trans.utterances: sentence", "switchboard_da_mapping(): mapping_dict = dict({ \"sd\": 1, \"b\": 2, \"sv\": 3,", "mtl_coherence.py else: word2id = lambda x: x tokenizer = word_tokenize", "= [] speaker_ixs = [] prev_act = \"%\" for utt", "insertion, nothing directly added!)\"\"\") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if", "= str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa =", "permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount):", "rename either speaker_ix or speaker_ixs, they are something different, but", "[] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if", "= task self.utt_num = 0 for utt in self.corpus.iter_utterances(): self.utt_num", "[] prev_da = \"%\" for i, utt in enumerate(self.corpus.iter_utterances()): sentence", "mapping_dict = dict({ \"sd\": 1, \"b\": 2, \"sv\": 3, \"aa\":", "% 2 == 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents =", "\"%\" if act == \"+\": act = prev_da _, swda_name", "p in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) = p", "u = str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa", "if ix == y: continue ix_removed = original[0:ix] + ([]", "the splits will be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num),", "d[k] = v return d if __name__ == \"__main__\": main()", "pu = [tok_seqs[i] for i in p] p_u = str(pu)", "os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname == 'validation' or", "len(segments)-2) segm_perm = deepcopy(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from]", "new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation = [] for segm_ix in", "segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments =", "== 'us': for p in permuted_ixs: (insert_sent, insert_da, name, ix,", "while True: # actually: do ... while permutation not in", "# CAUTION: make sure that for each task the seed", "range(amount): speaker = random.randint(0,1) # choose one of the speakers", "tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count) row = (act, utt, dialog_name,utt_i)", "too close if len(speaker_to_perm) < 2: return [] while True:", "5: continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs", "row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): #", "os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))", "= csv.writer(f) for perm in permuted_ixs: if self.task == 'us':", "in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.tokenizer(sentence)", "= sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation", "deepcopy from ast import literal_eval import pandas as pd from", "in utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for", "= [] permutations = [] i = 0 for _", "pa]) pu = [utterances[i] for i in p] p_u =", "= dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i", "while True: speaker = random.randint(0,1) # choose one of the", "1 continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs", "list of DAs must be equal\" if amount == 0:", "testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str,", "type=str, default=\"up\", help=\"\"\"for which task the dataset should be created.", "u = str(utterances) insert_sent, insert_da, name, ix, insert_ix = p", "= swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence,", "segment_permutations = [] permutations = [] i = 0 for", "else: for perm in segment_perms: csv_writer.writerow(perm) if self.task == 'us':", "\"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):", "= [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for", "ix in original: for y in range(length): if ix ==", "break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs =", "self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())]", "speaker_orig segment_permutations.append(new_segments) permutation = [] for segm_ix in new_segments: utt_ixs", "a in acts]) u = str(tok_seqs) pa = [acts[i] for", "8, \"ny\": 9, \"fc\": 10, \"%\": 11, \"qw\": 12, \"nn\":", "def speaker_segment_ixs(self, speaker_ixs): i = 0 segment_indices = dict() prev_speaker", "= permutation + utt_ixs if permutation not in permutations: break", "not in permutations i_from = random.randint(0, len(segments)-1) i_to = random.randint(0,", "\"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f)", "\".join([str(x) for x in pa]) pu = [utterances[i] for i", "str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = \"", "= sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation", "self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = [] permutations = []", "11, \"qw\": 12, \"nn\": 13, \"bk\": 14, \"h\": 15, \"qy^d\":", "itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file, \"w\") for (word,", "3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs, amount): \"\"\" return a list", "# create distinct train/validation/test files. they'll correspond to the created", "defaultdict import sys from nltk import word_tokenize from tqdm import", "\"%-\": 5, \"ba\": 6, \"qy\": 7, \"x\": 8, \"ny\": 9,", "\"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act", "dialog_name = \"{}_{}\".format(self.setname, line_count) row = (act, utt, dialog_name,utt_i) csv_writer.writerow(row)", "in mapping_dict.items(): d[k] = v return d if __name__ ==", "[list(range(len(sents)))] for _ in range(amount): while True: speaker = random.randint(0,1)", "= tokenizer self.word2id = word2id self.task = task self.utt_num =", "prev_act = act if \"A\" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1)", "amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]", "speaker = random.randint(0,1) # choose one of the speakers speaker_ix", "import deepcopy from ast import literal_eval import pandas as pd", "'w') testfile = open(test_output_file, 'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if", "utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\")", "permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments", "seqs = seqs[:-1] if len(seqs) < 5: discarded += 1", "argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input directory where the files", "CorpusReader, Transcript, Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents,", "25, \"qo\": 26, \"qh\": 27, \"^h\": 28, \"ar\": 29, \"ng\":", "in range(amount): (sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix =", "self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0 for trans in", "= re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act =", "0, segments)) #TODO: rename either speaker_ix or speaker_ixs, they are", "permutations.append(permutation) return permutations[1:] , segment_permutations #the first one is the", "the speakers speaker_ix = list(filter(lambda x: (x-speaker) % 2 ==", "= \" \".join([str(x) for x in acts]) u = str(utterances)", "= train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs = train_ixs,", "+ ([] if ix == length-1 else original[ix+1:]) ix_removed.insert(y, ix)", "segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation =", "i in p] p_u = str(pu) if i in self.train_ixs:", "\".join([str(x) for x in acts]) u = str(utterances) pa =", "tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower()", "be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)", "\"\"\" permutations = [] for _ in range(amount): (utt, da,", "dialog acts \"\"\" \"\"\" if amount is greater than the", "utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act ==", "are located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name of the", "list(set(segm_ixs.values())) for i in range(amount): while True: permutation = []", "\"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af", "return [] permutations = [list(range(len(sents)))] segment_permutations = [] amount =", "if act == None: act = \"%\" if act ==", "original, which was included s.t. won't be generated def draw_rand_sent(act_utt_df,", "os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path", "initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert the words to ids\") parser.add_argument('--task',", "segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation", "== valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict = dict({", "in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys def", "= open(dial_file, 'r') af = open(act_file, 'r') of = open(output_file,", "i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))", "41, \"t1\": 42, \"bd\": 43, \"aap\": 44, \"am\": 45, \"^g\":", "2, \"sv\": 3, \"aa\": 4, \"%-\": 5, \"ba\": 6, \"qy\":", "val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs,", "x in acts]) u = str(utterances) pa = [acts[i] for", "in range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class", "either 'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random seed", "\"^q\": 19, \"bf\": 20, \"na\": 21, \"ny^e\": 22, \"ad\": 23,", "self.setname = os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname ==", "= os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in", "new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig", "0 self.setname = os.path.split(data_dir)[1] assert self.setname == 'train' or self.setname", "the corpus to use, currently either 'DailyDialog' or 'Switchboard' \"\"\")", "distinct train/validation/test files. they'll correspond to the created # splits", "= \" \".join([str(x) for x in p_a]) p_u = deepcopy(utterances)", "while permutation not in permutations i_from = random.randint(0, len(segments)-1) i_to", "for perm in segment_perms: csv_writer.writerow(perm) if self.task == 'us': for", "one is the original, which was included s.t. won't be", "and ( not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):", "str(utterances) pa = [acts[i] for i in p] p_a =", "word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer", "24, \"b^m\": 25, \"qo\": 26, \"qh\": 27, \"^h\": 28, \"ar\":", "\"ng\": 30, \"nn^e\": 31, \"br\": 32, \"no\": 33, \"fp\": 34,", "da, name, ix, insert_ix) = perm row = [name, ix,insert_ix]", "= train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da = \"%\"", "len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length, amount):", "= original[0:ix] + ([] if ix == length-1 else original[ix+1:])", "af = open(act_file, 'r') of = open(self.output_file, 'w') discarded =", "in permuted_ixs: (utt, da, name, ix, insert_ix) = perm row", "= dict({ \"sd\": 1, \"b\": 2, \"sv\": 3, \"aa\": 4,", "_ in range(amount): while True: speaker = random.randint(0,1) # choose", "not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir,", "pd from math import factorial import random from collections import", "segment_permutations = [] permutations = [list(segm_ixs.keys())] for _ in range(amount):", "[int(act) for act in acts] if self.task == 'up': permuted_ixs", "i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))", "= self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a", "sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r')", "[name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task", "= utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self): r =", "petrurbation) ui (utterance insertion, nothing directly added!)\"\"\") args = parser.parse_args()", "= utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file,", "= self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act == None: act", "'r') of = open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for", "\"oo\": 39, \"co\": 40, \"cc\": 41, \"t1\": 42, \"bd\": 43,", "= switchboard_da_mapping() # CAUTION: make sure that for each task", "segments)) #TODO: rename either speaker_ix or speaker_ixs, they are something", "math import os from copy import deepcopy from ast import", "for (k, v) in mapping_dict.items(): d[k] = v return d", "perm in permuted_ixs: if self.task == 'us': (utt, da, name,", "\"+\": act = prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name =", "insert_da = self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa", "returned \"\"\" assert len(sents) == len(sent_DAs), \"length of permuted sentences", "tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir", "ix)) def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def", "create_vocab(self): print(\"Creating Vocab file for Switchboard\") cnt = Counter() for", "min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i", "use, currently either 'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42,", "2 != 0, segments)) #TODO: rename either speaker_ix or speaker_ixs,", "for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if", "of DAs must be equal\" if amount == 0: return", "[] speaker_ixs = [] prev_act = \"%\" for utt in", "permutation = np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation = np.random.permutation(len(sents))", "permutations[1:] def utterance_insertions(length, amount): possible_permutations = [] original = list(range(length))", "(sentence, act, swda_name, ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence,", "permuted_ixs: a = \" \".join([str(a) for a in acts]) u", "list(range(length)) for ix in original: for y in range(length): if", "import Counter, defaultdict import sys from nltk import word_tokenize from", "uniquely possible ones are returned \"\"\" assert len(sents) == len(sent_DAs),", "min(amount, factorial(len(sents))-1) for i in range(amount): permutation = np.random.permutation(len(sents)) while", "r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab", "os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not", "> math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length, amount): possible_permutations", "np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return", "29, \"ng\": 30, \"nn^e\": 31, \"br\": 32, \"no\": 33, \"fp\":", "random from collections import Counter, defaultdict import sys from nltk", "rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm =", "\"bh\": 18, \"^q\": 19, \"bf\": 20, \"na\": 21, \"ny^e\": 22,", "of = open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for line_count,", "= p a = \" \".join([str(a) for a in acts])", "= speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments)", "i in p] p_a = \" \".join([str(a) for a in", "swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act,", "_ in range(amount): (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix", "3, \"aa\": 4, \"%-\": 5, \"ba\": 6, \"qy\": 7, \"x\":", "self.task == 'us': (utt, da, name, ix, insert_ix) = perm", "os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts", "self.output_file = None self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics", "as f: csv_writer = csv.writer(f) for perm in permuted_ixs: if", "= os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af = open(act_file,", "seed is the same s.t. the splits will be the", "act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df) df", "for i in range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist() in", "permutation = permutation + utt_ixs if permutation not in permutations:", "parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name of the corpus to use,", "speaker = random.randint(0,1) # choose one of the speakers speaker_to_perm", "list() for item in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0])", "random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict", "csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is supposed to be", "item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict =", "lambda x: x tokenizer = word_tokenize if args.corpus == 'DailyDialog':", "1 to 4 and utt being a sentence \"\"\" permutations", "(i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to] = i_from if (not", "half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui': permuted_ixs = utterance_insertions(len(tok_seqs),", "def permute(sents, sent_DAs, amount): \"\"\" return a list of different!", "nothing directly added!)\"\"\") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id:", "self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a =", "permuted sentences and list of DAs must be equal\" permutations", "not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break return permutations, segment_permutations def", "== 'train' or self.setname == 'validation' or self.setname == 'test',", "for trans in self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mapping()", "half_perturb(sents, sent_DAs, amount): assert len(sents) == len(sent_DAs), \"length of permuted", "= 0 for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):", "print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):", "list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents)))) permuted_speaker_ix =", "acts = [] speaker_ixs = [] prev_act = \"%\" for", "are returned \"\"\" assert len(sents) == len(sent_DAs), \"length of permuted", "p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(x)", "open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for line_count, (dial, act)", "if amount is greater than the possible amount of permutations,", "x: (x-speaker) % 2 == 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix)", "[list(range(len(sents)))] segment_permutations = [] amount = min(amount, factorial(len(sents))-1) segm_ixs =", "of the corpus are located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the", "print(\"Creating {} perturbations for task {}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir,", "= speaker i += 1 segment_indices[j] = i return segment_indices", "[] permutations = [] i = 0 for _ in", "new_segments[1::2] = permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation = []", "= random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents,", "lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af = open(act_file, 'r')", "permutations def convert_dset(self, amounts): # create distinct train/validation/test files. they'll", "if speaker == 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2] =", "\"convert the words to ids\") parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for", "act == \"+\": act = prev_act acts.append(self.da2num[act]) prev_act = act", "permutation = [] for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs,", "import re import csv from sklearn.model_selection import train_test_split from swda.swda", "line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__')", "word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard': converter = SwitchboardConverter(args.datadir,", "for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq in", "train/test/val-dataset files print(\"Creating {} perturbations for task {}\".format(amounts, self.task)) dial_file", "\"bf\": 20, \"na\": 21, \"ny^e\": 22, \"ad\": 23, \"^2\": 24,", "was included s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs): i", "acts] if self.task == 'up': permuted_ixs = permute(tok_seqs, acts, amounts)", "seqs] tok_seqs = [[w.lower() for w in utt] for utt", "original[0:ix] + ([] if ix == length-1 else original[ix+1:]) ix_removed.insert(y,", "ones are returned \"\"\" assert len(sents) == len(sent_DAs), \"length of", "segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments =", "44, \"am\": 45, \"^g\": 46, \"qw^d\": 47, \"fa\": 48, \"ft\":49", "in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first", "= [self.word2id(seq) for seq in tok_seqs] acts = act.split(' ')", "(x-speaker) % 2 != 0, segments)) #TODO: rename either speaker_ix", "amount of permutations, only the uniquely possible ones are returned", "= list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents)))) permuted_speaker_ix", "word2id = lambda x: [word2id_dict[y] for y in x] #", "v) in mapping_dict.items(): d[k] = v return d if __name__", "== 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task ==", "'w') valfile = open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path", "which was included s.t. won't be generated def draw_rand_sent(act_utt_df, sent_len,", "for initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random seed for initialization\") parser.add_argument('--word2id',", "in range(amount): speaker = random.randint(0,1) # choose one of the", "os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, \"w\") as f: csv_writer", "permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first one is", "\".join([str(a) for a in pa]) pu = [tok_seqs[i] for i", "\"ny^e\": 22, \"ad\": 23, \"^2\": 24, \"b^m\": 25, \"qo\": 26,", "in acts]) u = str(tok_seqs) pa = [acts[i] for i", "for seq in tok_seqs] acts = act.split(' ') acts =", "new_sents = list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix): new_sents[i_to]", "sentence = self.tokenizer(sentence) for w in sentence: cnt[w] += 1", "+= 1 segment_indices[j] = i return segment_indices def swda_half_perturb(self, amount,", "trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i", "= \" \".join([str(a) for a in pa]) pu = [tok_seqs[i]", "'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af = open(act_file, 'r') of", "34, \"qrr\": 35, \"arp\": 36, \"nd\": 37, \"t3\": 38, \"oo\":", "act) in tqdm(enumerate(zip(df, af)), total=11118): seqs = dial.split('__eou__') seqs =", "'us': (utt, da, name, ix, insert_ix) = perm row =", "amounts, speaker_ixs) elif self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)", "self.task = task self.ranking_dataset = ranking_dataset self.perturbation_statistics = 0 self.setname", "\"qw^d\": 47, \"fa\": 48, \"ft\":49 }) d = defaultdict(lambda: 11)", "self.task == 'us': for perm in permuted_ixs: (utt, da, name,", "[] i = 0 for _ in range(amounts): while True:", "i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))", "39, \"co\": 40, \"cc\": 41, \"t1\": 42, \"bd\": 43, \"aap\":", "for initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert the words to ids\")", "sentence \"\"\" permutations = [] for _ in range(amount): (utt,", "the corpus are located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name", "os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file =", "da, name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix =", "choose one of the speakers speaker_ix = list(filter(lambda x: (x-speaker)", "elif self.task == 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)", "np import re import csv from sklearn.model_selection import train_test_split from", "[] for i in range(amount): (sentence, act, swda_name, ix) =", "in segment_perms: csv_writer.writerow(perm) if self.task == 'us': for p in", "amount): assert len(sents) == len(sent_DAs), \"length of permuted sentences and", "different, but the names are too close if len(speaker_to_perm) <", "segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs", ", segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs,", "= lambda: draw_rand_sent_from_df(act_utt_df) df = open(dial_file, 'r') af = open(act_file,", "#TODO: rename either speaker_ix or speaker_ixs, they are something different,", "data_dir, tokenizer, word2id, task='', ranking_dataset = True): self.data_dir = data_dir", "of permuted sentences and list of DAs must be equal\"", "permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs ,", "= [int(act) for act in acts] for utt_i, (act, utt)", "22, \"ad\": 23, \"^2\": 24, \"b^m\": 25, \"qo\": 26, \"qh\":", "i in range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist() in permutations:", "self.task == 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname =", "the respective train/test/val-dataset files print(\"Creating {} perturbations for task {}\".format(amounts,", "np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict =", "return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments", "is supposed to be a pandas dataframe with colums 'act'", "[] amount = min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments =", "a in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))", "sys from nltk import word_tokenize from tqdm import tqdm, trange", "something different, but the names are too close if len(speaker_to_perm)", "parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input directory where", "shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, \"w\") as", "af = open(act_file, 'r') of = open(output_file, 'w') csv_writer =", "== 'test', \"wrong data dir name\" def create_act_utt(self): dial_file =", "= deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(a) for", "'r') as f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator =", "= utt.damsl_act_tag() if act == None: act = \"%\" if", "the possible amount of permutations, only the uniquely possible ones", "10, \"%\": 11, \"qw\": 12, \"nn\": 13, \"bk\": 14, \"h\":", "self.trans_num = 0 for trans in self.corpus.iter_transcripts(): self.trans_num += 1", "\"na\": 21, \"ny^e\": 22, \"ad\": 23, \"^2\": 24, \"b^m\": 25,", "help=\"\"\"the name of the corpus to use, currently either 'DailyDialog'", "segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path,", "input files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\" with open(self.act_utt_file,", "us (utterance sampling) hup (half utterance petrurbation) ui (utterance insertion,", "Counter() for utt in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text)", "tokenizer = word_tokenize if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir,", "splits will be the same! train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True,", "in the glove wrapper of mtl_coherence.py else: word2id = lambda", "\"bk\": 14, \"h\": 15, \"qy^d\": 16, \"o\": 17, \"bh\": 18,", "is greater than the possible amount of permutations, only the", "(word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents, amount,", "os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find input files\"", "val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))", "segment_permutations.append(new_segments) permutation = [] for segm_ix in new_segments: utt_ixs =", "swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with", "30, \"nn^e\": 31, \"br\": 32, \"no\": 33, \"fp\": 34, \"qrr\":", "= word2id self.task = task self.utt_num = 0 for utt", "pa = \" \".join([str(a) for a in p_a]) p_u =", "Switchboard\") cnt = Counter() for utt in self.corpus.iter_utterances(): sentence =", "= (act, utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir", "[:-4] with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) if", "= insert_da pa = \" \".join([str(x) for x in p_a])", "root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path):", "[None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 : new_segments[::2] = permuted_speaker_ix new_segments[1::2]", "\".join([str(x) for x in p_a]) p_u = deepcopy(utterances) p_u[insert_ix] =", "ix, insert_ix)) return permutations def convert_dset(self, amounts): # create distinct", "0 for utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num =", "y: continue ix_removed = original[0:ix] + ([] if ix ==", "\"%\" for utt in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text)", "help= \"convert the words to ids\") parser.add_argument('--task', required=True, type=str, default=\"up\",", "assert self.setname == 'train' or self.setname == 'validation' or self.setname", "(utt, da, name, ix, insert_ix) = perm row = [name,", "from tqdm import tqdm, trange import argparse import numpy as", "== length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = []", "create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file", "with open(shuffle_file, \"w\") as f: csv_writer = csv.writer(f) for perm", "= os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix", "utt] for utt in tok_seqs] tok_seqs = [self.word2id(seq) for seq", "csv.writer(of, delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):", "os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile =", "0 for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs", "pa]) pu = [tok_seqs[i] for i in p] p_u =", "self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.tokenizer(sentence) for", "from nltk import word_tokenize from tqdm import tqdm, trange import", "word2id, task='', ranking_dataset = True): self.data_dir = data_dir self.act_utt_file =", "speaker_ixs, they are something different, but the names are too", "utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self): r = random.randint(0,", "= \" \".join([str(a) for a in acts]) u = str(tok_seqs)", "default=42, help=\"random seed for initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random seed", "test_ixs self.utt_da_pairs = [] prev_da = \"%\" for i, utt", "else: speaker_ixs.append(1) if self.task == 'up': permuted_ixs , segment_perms =", "not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find", "0, segments)) speaker_orig = list(filter(lambda x: (x-speaker) % 2 !=", "for _ in range(amount): speaker = random.randint(0,1) # choose one", "segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation", "a number from 1 to 4 and utt being a", "act_utt.txt in data_dir\" with open(self.act_utt_file, 'r') as f: act_utt_df =", "greater than the possible amount of permutations, only the uniquely", "for p in permuted_ixs: a = \" \".join([str(a) for a", "parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--amount', type=int, default=20,", "utt in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0 for", "permuted_speaker_ix new_segments[1::2] = speaker_orig else: new_segments[1::2] = permuted_speaker_ix new_segments[::2] =", "draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs,", "= CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer self.word2id =", "for p in permuted_ixs: (insert_sent, insert_da, name, ix, insert_ix) =", "self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer self.word2id", "\"sv\": 3, \"aa\": 4, \"%-\": 5, \"ba\": 6, \"qy\": 7,", "s.t. won't be generated def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df", "word_tokenize if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id,", "range(amount): permutation = np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation =", "\"t3\": 38, \"oo\": 39, \"co\": 40, \"cc\": 41, \"t1\": 42,", "of the speakers speaker_ix = list(filter(lambda x: (x-speaker) % 2", "if args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict = dict()", "= os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file,", "parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"), \"r\")", "self.corpus.iter_transcripts(): self.trans_num += 1 self.da2num = switchboard_da_mapping() # CAUTION: make", "p_a]) p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent if i in", "ix, insert_ix) = p a = \" \".join([str(a) for a", "speaker_ixs): i = 0 segment_indices = dict() prev_speaker = speaker_ixs[0]", "= str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir,", "act == \"+\": act = prev_da _, swda_name = os.path.split(utt.swda_filename)", "word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda x:", "item in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0]) return listOfKeys", "= random.randint(0,1) # choose one of the speakers speaker_ix =", "utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs if", "= deepcopy(utterances) p_u[insert_ix] = insert_sent if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u))", "name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da,", "math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length, amount): possible_permutations =", "seq in tok_seqs] acts = act.split(' ') acts = acts[:-1]", "csv_writer = csv.writer(of, delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(df,", "in range(amount): while True: speaker = random.randint(0,1) # choose one", "self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True,", "\"am\": 45, \"^g\": 46, \"qw^d\": 47, \"fa\": 48, \"ft\":49 })", "dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: discarded +=", "= np.random.permutation(len(sents)) while permutation.tolist() in permutations: permutation = np.random.permutation(len(sents)) permutations.append(permutation.tolist())", "sampling) hup (half utterance petrurbation) ui (utterance insertion, nothing directly", "draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]", "utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs if", "enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts = [] speaker_ixs =", "35, \"arp\": 36, \"nd\": 37, \"t3\": 38, \"oo\": 39, \"co\":", "seqs = dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5:", "segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation = [] for segm_ix", "23, \"^2\": 24, \"b^m\": 25, \"qo\": 26, \"qh\": 27, \"^h\":", "= data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id", "{} perturbations for task {}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname))", "+ utt_ixs if permutation not in permutations: permutations.append(permutation) segment_permutations.append(segm_perm) break", "os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))", "(utterance sampling) hup (half utterance petrurbation) ui (utterance insertion, nothing", "os.path.split(trans.swda_filename)[1] shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4] with open(shuffle_file, \"w\")", "if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser =", "i = 0 segment_indices = dict() prev_speaker = speaker_ixs[0] for", "= draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif self.task == 'hup': permuted_ixs =", "in p] p_a = \" \".join([str(x) for x in pa])", "for y in range(length): if ix == y: continue ix_removed", "segments = list(set(segment_ixs.values())) segment_permutations = [] permutations = [] i", "self.da2num = switchboard_da_mapping() # CAUTION: make sure that for each", "= [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 : new_segments[::2] = permuted_speaker_ix", "perm in segment_perms: csv_writer.writerow(perm) if self.task == 'us': for p", "insert_sent if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in", "speakers speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0,", "self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = \"", "if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w')", "of permutations, only the uniquely possible ones are returned \"\"\"", "for y in x] # don't convert words to ids", "range(amount): (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0,", "return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab file for Switchboard\") cnt", "import random from collections import Counter, defaultdict import sys from", "assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could not find input files\" assert", "0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to,", "in tok_seqs] tok_seqs = [self.word2id(seq) for seq in tok_seqs] acts", "dict() for i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id", "segment_indices[j] = i return segment_indices def swda_half_perturb(self, amount, speaker_ixs): segm_ixs", "of the speakers speaker_to_perm = list(filter(lambda x: (x-speaker) % 2", "enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker = speaker i +=", "ids\") parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for which task the dataset", "self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if", "else: for p in permuted_ixs: a = \" \".join([str(a) for", "self.task == 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task ==", "\"fa\": 48, \"ft\":49 }) d = defaultdict(lambda: 11) for (k,", "= re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag()", "acts]) u = str(utterances) pa = [acts[i] for i in", "to ids (yet). It gets done in the glove wrapper", "testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a = \"", "ix == y: continue ix_removed = original[0:ix] + ([] if", "= [list(range(len(sents)))] segment_permutations = [] amount = min(amount, factorial(len(sents))-1) segm_ixs", "row = [name, ix,insert_ix] csv_writer.writerow(row) else: for perm in segment_perms:", "< 5: discarded += 1 continue tok_seqs = [self.tokenizer(seq) for", "new_sents == permutations[0]) and ( not new_sents in permutations or", "p_u = deepcopy(utterances) p_u[insert_ix] = insert_sent if i in self.train_ixs:", "random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab file for", "itosf = open(itos_file, \"w\") for (word, _) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word))", "for item in dictOfElements.items(): if item[1] == valueToFind: listOfKeys.append(item[0]) return", "def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self):", "'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task == 'ui':", "open(os.path.join(args.datadir, \"itos.txt\"), \"r\") word2id_dict = dict() for i, word in", "ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _ in range(amount):", "splits from the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file", "permutations = [list(range(len(sents)))] amount = min(amount, factorial(len(sents))-1) for i in", "discarded += 1 continue tok_seqs = [self.tokenizer(seq) for seq in", "permutation not in permutations: break permutations.append(permutation) return permutations[1:] , segment_permutations", "\"b^m\": 25, \"qo\": 26, \"qh\": 27, \"^h\": 28, \"ar\": 29,", "True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer =", "w in sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir, \"itos.txt\")", "\"wrong data dir name\" def create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname))", "len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer,", "file for Switchboard\") cnt = Counter() for utt in self.corpus.iter_utterances():", "literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert len(sents)", "DAs must be equal\" if amount == 0: return []", "f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)", "self.word2id = word2id self.output_file = None self.task = task self.ranking_dataset", "acts]) u = str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] = insert_da", "[word2id_dict[y] for y in x] # don't convert words to", "\"qh\": 27, \"^h\": 28, \"ar\": 29, \"ng\": 30, \"nn^e\": 31,", "CAUTION: make sure that for each task the seed is", "np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 : new_segments[::2]", "[self.word2id(seq) for seq in tok_seqs] acts = act.split(' ') acts", "+ utt_ixs if permutation not in permutations: break permutations.append(permutation) return", "\"fc\": 10, \"%\": 11, \"qw\": 12, \"nn\": 13, \"bk\": 14,", "self.utt_da_pairs.append((sentence, act, swda_name, ix)) def draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1)", "swda.swda import CorpusReader, Transcript, Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"}", "\"qrr\": 35, \"arp\": 36, \"nd\": 37, \"t3\": 38, \"oo\": 39,", "= perm row = [name, ix,insert_ix] csv_writer.writerow(row) else: csv_writer.writerow(perm) self.perturbation_statistics", "converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item in", "pandas as pd from math import factorial import random from", "swda_half_perturb(self, amount, speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations", "amount): \"\"\" return a list of different! permuted sentences and", "== 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task", "15, \"qy^d\": 16, \"o\": 17, \"bh\": 18, \"^q\": 19, \"bf\":", "2 == 0, segments)) speaker_orig = list(filter(lambda x: (x-speaker) %", "correspond to the created # splits from the constructor train_output_file", "getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item in dictOfElements.items(): if", "== 'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname,", "speaker_ix or speaker_ixs, they are something different, but the names", "args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir,", "== 0, range(len(sents)))) permuted_speaker_ix = np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for", "default=20, help=\"random seed for initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert the", "p] p_a = \" \".join([str(a) for a in pa]) pu", "for i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id =", "= DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task) converter.create_act_utt() elif args.corpus == 'Switchboard':", "self.tokenizer = tokenizer self.word2id = word2id self.task = task self.utt_num", "i_to = random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem = segments[i_from]", "\"itos.txt\"), \"r\") word2id_dict = dict() for i, word in enumerate(f):", "speakers speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0,", "'w') discarded = 0 for line_count, (dial, act) in tqdm(enumerate(zip(df,", "= train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True,", "open(self.act_utt_file, 'r') as f: act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix']) rand_generator", "act, swda_name, ix, insert_ix)) return permutations def convert_dset(self, amounts): #", "= lambda x: [word2id_dict[y] for y in x] # don't", "in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents, amount, speaker_ixs): if", "trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i", "pa = \" \".join([str(x) for x in p_a]) p_u =", "'w') shuffled_path = os.path.join(self.data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for", "speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))", "[] for _ in range(amount): (utt, da, name, ix) =", "sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act =", "hup (half utterance petrurbation) ui (utterance insertion, nothing directly added!)\"\"\")", "being a sentence \"\"\" permutations = [] for _ in", "for _ in range(amounts): while True: # actually: do ...", "amounts) elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)", "= os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and", "random.randint(0, len(segments)-2) segm_perm = deepcopy(segments) rem_elem = segments[i_from] segm_perm =", "'ui': permuted_ixs = utterance_insertions(len(tok_seqs), amounts) shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count))", "= self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations = [] permutations =", "speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) permutations =", "word2id self.output_file = None self.task = task self.ranking_dataset = ranking_dataset", "as f: csv_writer = csv.writer(f) if self.task == 'us': for", "(yet). It gets done in the glove wrapper of mtl_coherence.py", "else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for _", "df is supposed to be a pandas dataframe with colums", "= permuted_speaker_ix new_segments[::2] = speaker_orig segment_permutations.append(new_segments) permutation = [] for", "< 2: return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments", "sentences and list of DAs must be equal\" permutations =", "f: csv_writer = csv.writer(f) for perm in permuted_ixs: if self.task", "p] p_a = \" \".join([str(x) for x in pa]) pu", "\"\"\" df is supposed to be a pandas dataframe with", "permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0", "to be a pandas dataframe with colums 'act' and 'utt'", "different! permuted sentences and their respective dialog acts \"\"\" \"\"\"", "= act if \"A\" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if", "[] prev_act = \"%\" for utt in trans.utterances: sentence =", "= data_dir self.tokenizer = tokenizer self.word2id = word2id self.task =", "x: x tokenizer = word_tokenize if args.corpus == 'DailyDialog': converter", "import numpy as np import re import csv from sklearn.model_selection", "\" \".join([str(x) for x in acts]) u = str(utterances) insert_sent,", "original = list(range(length)) for ix in original: for y in", "\"qo\": 26, \"qh\": 27, \"^h\": 28, \"ar\": 29, \"ng\": 30,", "u = str(tok_seqs) pa = [acts[i] for i in p]", "\"fp\": 34, \"qrr\": 35, \"arp\": 36, \"nd\": 37, \"t3\": 38,", "re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.tokenizer(sentence) for w in sentence:", "p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def", "for _ in range(amount): (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)", "os.makedirs(os.path.join(self.data_dir, 'test')) trainfile = open(train_output_file, 'w') valfile = open(val_output_file, 'w')", "permutations = [] for _ in range(amount): (utt, da, name,", "only the uniquely possible ones are returned \"\"\" assert len(sents)", "act = prev_act acts.append(self.da2num[act]) prev_act = act if \"A\" in", "= word_tokenize if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer,", "permutations i_from = random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm", "dataframe with colums 'act' and 'utt' (utterance), with act being", "swda_name.endswith('.csv') else swda_name ix = utt.utterance_index self.utt_da_pairs.append((sentence, act, swda_name, ix))", "\"x\": 8, \"ny\": 9, \"fc\": 10, \"%\": 11, \"qw\": 12,", "self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id", "words to ids\") parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for which task", "= np.random.permutation(len(segments)) segment_permutations.append(segm_perm) for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs,", "ranking_dataset self.perturbation_statistics = 0 self.setname = os.path.split(data_dir)[1] assert self.setname ==", "str(tok_seqs) pa = [acts[i] for i in p] p_a =", "p a = \" \".join([str(a) for a in acts]) u", "a in acts]) u = str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix]", "with colums 'act' and 'utt' (utterance), with act being a", "do ... while permutation not in permutations i_from = random.randint(0,", "math import factorial import random from collections import Counter, defaultdict", "the words to ids\") parser.add_argument('--task', required=True, type=str, default=\"up\", help=\"\"\"for which", "csv.writer(f) for perm in permuted_ixs: if self.task == 'us': (utt,", "= self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix))", "input directory where the files of the corpus are located.", "= segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:] permutation = [] for", "= self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms =", "in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))", "was included s.t. won't be generated def draw_rand_sent(act_utt_df, sent_len, amount):", "return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs = self.speaker_segment_ixs(speaker_ixs)", "for task {}\".format(amounts, self.task)) dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file =", "type=str, help=\"\"\"the name of the corpus to use, currently either", "\"no\": 33, \"fp\": 34, \"qrr\": 35, \"arp\": 36, \"nd\": 37,", "'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir,", "continue tok_seqs = [self.tokenizer(seq) for seq in seqs] tok_seqs =", "def create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname))", "os.path.isdir(shuffled_path): os.mkdir(shuffled_path) for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = []", "sentences and their respective dialog acts \"\"\" \"\"\" if amount", "their respective dialog acts \"\"\" \"\"\" if amount is greater", "for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)): utterances = [] acts =", "\"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir, \"dialogues_act_{}.txt\".format(self.setname)) output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df", "return permutations[1:] , segment_permutations #the first one is the original,", "don't convert words to ids (yet). It gets done in", "seqs[:-1] if len(seqs) < 5: continue tok_seqs = [self.tokenizer(seq) for", "deepcopy(segments) rem_elem = segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm", "permutations[1:] #the first one is the original, which was included", "def __init__(self, data_dir, tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir)", "os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert", "= Counter() for utt in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",", "x in acts]) u = str(utterances) insert_sent, insert_da, name, ix,", "= parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) if args.word2id: f = open(os.path.join(args.datadir, \"itos.txt\"),", "i in p] p_a = \" \".join([str(x) for x in", "'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir, \"shuffled_{}\".format(self.task)) if not", "= [] for _ in range(amount): (utt, da, name, ix)", "= deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p", "permutations[1:] , segment_permutations #the first one is the original, which", "def utterance_insertions(length, amount): possible_permutations = [] original = list(range(length)) for", "if speaker != prev_speaker: prev_speaker = speaker i += 1", "= open(self.output_file, 'w') discarded = 0 for line_count, (dial, act)", "= segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to]", "os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task)) df = open(dial_file, 'r') af = open(act_file, 'r')", "= permutation + utt_ixs if not permutation in permutations: permutations.append(permutation)", "be created. alternatives: up (utterance permutation) us (utterance sampling) hup", "= acts[:-1] acts = [int(act) for act in acts] if", "14, \"h\": 15, \"qy^d\": 16, \"o\": 17, \"bh\": 18, \"^q\":", "!= 0, segments)) #TODO: rename either speaker_ix or speaker_ixs, they", "the original, which was included s.t. won't be generated def", "p_a = \" \".join([str(a) for a in pa]) pu =", "= \"%\" for utt in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\",", "a = \" \".join([str(x) for x in acts]) u =", "elif self.task == 'us': permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts) elif", "segment_perms = self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us': permuted_ixs", "= argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input directory where the", "task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer =", "ast import literal_eval import pandas as pd from math import", "of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a = \" \".join([str(a)", "= [] original = list(range(length)) for ix in original: for", "permutations = [list(range(len(sents)))] for _ in range(amount): while True: speaker", "= np.random.permutation(speaker_ix) new_sents = list(range(len(sents))) for (i_to, i_from) in zip(speaker_ix,", "ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def", "of = open(self.output_file, 'w') discarded = 0 for line_count, (dial,", "from swda.swda import CorpusReader, Transcript, Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\",", "self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs = [] prev_da", "or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def utterance_insertions(length,", "utt, dialog_name,utt_i) csv_writer.writerow(row) def convert_dset(self, amounts): # data_dir is supposed", "enumerate(zip(acts, tok_seqs)): dialog_name = \"{}_{}\".format(self.setname, line_count) row = (act, utt,", "for _ in range(amount): while True: speaker = random.randint(0,1) #", "random_state=seed) val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs,", "sent_DAs, amount): assert len(sents) == len(sent_DAs), \"length of permuted sentences", "self.task = task self.utt_num = 0 for utt in self.corpus.iter_utterances():", "valueToFind: listOfKeys.append(item[0]) return listOfKeys def switchboard_da_mapping(): mapping_dict = dict({ \"sd\":", "_ in range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations", "len(sent_DAs), \"length of permuted sentences and list of DAs must", "27, \"^h\": 28, \"ar\": 29, \"ng\": 30, \"nn^e\": 31, \"br\":", "tokenizer, word2id, args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys", "of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class SwitchboardConverter: def __init__(self, data_dir, tokenizer, word2id, task='',", "\"\"\" assert len(sents) == len(sent_DAs), \"length of permuted sentences and", "self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs self.utt_da_pairs = []", "swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix = utt.utterance_index", "permutations: break permutations.append(permutation) return permutations[1:] , segment_permutations #the first one", "os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name ix =", "dial.split('__eou__') seqs = seqs[:-1] if len(seqs) < 5: continue tok_seqs", "in range(length): if ix == y: continue ix_removed = original[0:ix]", "CorpusReader(data_dir) self.data_dir = data_dir self.tokenizer = tokenizer self.word2id = word2id", "= os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task)) test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if", "= list(set(segment_ixs.values())) segment_permutations = [] permutations = [] i =", "insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix)) return permutations", "os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\" with open(self.act_utt_file, 'r') as f:", "DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):", "segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) for i in range(amount):", "if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.test_ixs:", "sent_len, amount): \"\"\" df is supposed to be a pandas", "to 4 and utt being a sentence \"\"\" permutations =", "csv.writer(f) if self.task == 'us': for perm in permuted_ixs: (utt,", "if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs:", "permuted_ixs: (utt, da, name, ix, insert_ix) = perm row =", "# actually: do ... while permutation not in permutations i_from", "= list(range(length)) for ix in original: for y in range(length):", "0: return [] permutations = [list(range(len(sents)))] segment_permutations = [] amount", "1 segment_indices[j] = i return segment_indices def swda_half_perturb(self, amount, speaker_ixs):", "import math import os from copy import deepcopy from ast", "def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for item in dictOfElements.items():", "possible ones are returned \"\"\" assert len(sents) == len(sent_DAs), \"length", "val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs", "for j,speaker in enumerate(speaker_ixs): if speaker != prev_speaker: prev_speaker =", "f: csv_writer = csv.writer(f) if self.task == 'us': for perm", "= random.randint(0, sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix)) return permutations", "# splits from the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))", "self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def", "total=1155)): utterances = [] acts = [] speaker_ixs = []", "if permutation not in permutations: break permutations.append(permutation) return permutations[1:] ,", "os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\") as f: csv_writer =", "amounts) elif self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts)", "enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) act", "18, \"^q\": 19, \"bf\": 20, \"na\": 21, \"ny^e\": 22, \"ad\":", "np.random.permutation(len(sents)) permutations.append(permutation.tolist()) return permutations[1:] #the first one is the original,", "break return permutations[1:], segment_permutations def swda_utterance_insertion(self, speaker_ixs, amounts): segment_ixs =", "cnt[w] += 1 itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file,", "are something different, but the names are too close if", "self.task == 'hup': permuted_ixs = half_perturb(tok_seqs, acts, amounts) elif self.task", "= re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.tokenizer(sentence) for w in", "utt in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence =", "in enumerate(self.corpus.iter_utterances()): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence))", "testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a = \" \".join([str(x)", "5: discarded += 1 continue tok_seqs = [self.tokenizer(seq) for seq", "words to ids (yet). It gets done in the glove", "21, \"ny^e\": 22, \"ad\": 23, \"^2\": 24, \"b^m\": 25, \"qo\":", "shuffle_file = os.path.join(shuffled_path, \"{}_{}.csv\".format(self.setname, line_count)) with open(shuffle_file, \"w\") as f:", "0 for _ in range(amounts): while True: # actually: do", "of mtl_coherence.py else: word2id = lambda x: x tokenizer =", "re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag()", "(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1)", "create distinct train/validation/test files. they'll correspond to the created #", "== y: continue ix_removed = original[0:ix] + ([] if ix", "= list(set(segm_ixs.values())) segment_permutations = [] permutations = [list(segm_ixs.keys())] for _", "12, \"nn\": 13, \"bk\": 14, \"h\": 15, \"qy^d\": 16, \"o\":", "def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True): self.data_dir", "list(filter(lambda x: (x-speaker) % 2 != 0, segments)) #TODO: rename", "tok_seqs = [[w.lower() for w in utt] for utt in", "from collections import Counter, defaultdict import sys from nltk import", "i in p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) print(discarded) class", "'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id self.output_file = None", "prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker !=", "segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui': permuted_ixs, segment_perms", "type=int, default=20, help=\"random seed for initialization\") parser.add_argument('--word2id', action='store_true', help= \"convert", "self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer", "d = defaultdict(lambda: 11) for (k, v) in mapping_dict.items(): d[k]", "== 'us': permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup':", "import argparse import numpy as np import re import csv", "pu = [utterances[i] for i in p] p_u = str(pu)", "listOfKeys = list() for item in dictOfElements.items(): if item[1] ==", "dict({ \"sd\": 1, \"b\": 2, \"sv\": 3, \"aa\": 4, \"%-\":", "colums 'act' and 'utt' (utterance), with act being a number", "+ segm_perm[i_to:] permutation = [] for segm_ix in segm_perm: utt_ixs", "utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if act", "else: csv_writer.writerow(perm) self.perturbation_statistics += len(permuted_ixs) if self.task == 'us': for", "ix) = self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name, ix,", "than the possible amount of permutations, only the uniquely possible", "random.randint(0, sent_len-1) permutations.append((utt, da, name, ix, sent_insert_ix)) return permutations def", "first one is the original, which was included s.t. won't", "segments[i_from] segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] +", "sorted(getKeysByValue(segment_ixs, segm_ix)) permutation = permutation + utt_ixs if permutation not", "\".join([str(x) for x in acts]) u = str(utterances) insert_sent, insert_da,", "original: for y in range(length): if ix == y: continue", "csv from sklearn.model_selection import train_test_split from swda.swda import CorpusReader, Transcript,", "speaker_ixs, amounts): segment_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segment_ixs.values())) segment_permutations =", "if len(seqs) < 5: discarded += 1 continue tok_seqs =", "files of the corpus are located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str,", "list of DAs must be equal\" permutations = [list(range(len(sents)))] for", "= os.path.join(data_dir, 'act_utt_name.txt') self.tokenizer = tokenizer self.word2id = word2id self.output_file", "as np import re import csv from sklearn.model_selection import train_test_split", "a pandas dataframe with colums 'act' and 'utt' (utterance), with", "supposed to be a pandas dataframe with colums 'act' and", "located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name of the corpus", "== 'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task ==", "p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs:", "= os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task)) if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train'))", "self.setname == 'validation' or self.setname == 'test', \"wrong data dir", "required=True, type=str, default=\"up\", help=\"\"\"for which task the dataset should be", "+= len(permuted_ixs) if self.task == 'us': for p in permuted_ixs:", "= open(output_file, 'w') csv_writer = csv.writer(of, delimiter='|') for line_count, (dial,", "p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for", "[tok_seqs[i] for i in p] p_u = str(pu) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))", "os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir, 'test'))", "utterance petrurbation) ui (utterance insertion, nothing directly added!)\"\"\") args =", "= 0 segment_indices = dict() prev_speaker = speaker_ixs[0] for j,speaker", "= lambda x: x tokenizer = word_tokenize if args.corpus ==", "the dataset should be created. alternatives: up (utterance permutation) us", "p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else:", "\"^g\": 46, \"qw^d\": 47, \"fa\": 48, \"ft\":49 }) d =", "in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task == 'up': permuted_ixs", "== \"+\": act = prev_act acts.append(self.da2num[act]) prev_act = act if", "== None: act = \"%\" if act == \"+\": act", "for utt in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence", "self.task == 'us': for p in permuted_ixs: (insert_sent, insert_da, name,", "random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs,", "\"\", utt.text) sentence = self.tokenizer(sentence) for w in sentence: cnt[w]", "df = open(dial_file, 'r') af = open(act_file, 'r') of =", "'Switchboard' \"\"\") parser.add_argument('--seed', type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--amount',", "36, \"nd\": 37, \"t3\": 38, \"oo\": 39, \"co\": 40, \"cc\":", "permutation = permutation + utt_ixs if not permutation in permutations:", "26, \"qh\": 27, \"^h\": 28, \"ar\": 29, \"ng\": 30, \"nn^e\":", "= [] for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))", "return [] while True: permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))", "i, word in enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda", "permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task == 'ui':", "sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act", "in range(amount): (utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df) sent_insert_ix =", "valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) def main(): parser", "!= prev_speaker: prev_speaker = speaker i += 1 segment_indices[j] =", "find input files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in data_dir\" with", "45, \"^g\": 46, \"qw^d\": 47, \"fa\": 48, \"ft\":49 }) d", "return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert", "4, \"%-\": 5, \"ba\": 6, \"qy\": 7, \"x\": 8, \"ny\":", "open(dial_file, 'r') af = open(act_file, 'r') of = open(output_file, 'w')", "def convert_dset(self, amounts): # create distinct train/validation/test files. they'll correspond", "self.perturbation_statistics += len(permuted_ixs) if self.task == 'us': for p in", "re import csv from sklearn.model_selection import train_test_split from swda.swda import", "csv_writer = csv.writer(f) if self.task == 'us': for perm in", "'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs) elif self.task ==", "i_from = random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2) segm_perm =", "from the constructor train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task)) val_output_file =", "data_dir, tokenizer, word2id, task='', seed=42): self.corpus = CorpusReader(data_dir) self.data_dir =", "one of the speakers speaker_to_perm = list(filter(lambda x: (x-speaker) %", "\"\"\" return a list of different! permuted sentences and their", "permutations or len(permutations) > math.factorial(len(speaker_ix))): permutations.append(new_sents) break return permutations[1:] def", "= self.swda_permute(utterances, amounts, speaker_ixs) elif self.task == 'us': permuted_ixs =", "word2id self.task = task self.utt_num = 0 for utt in", "= [acts[i] for i in p] p_a = \" \".join([str(a)", "[rem_elem] + segm_perm[i_to:] permutation = [] for segm_ix in segm_perm:", "if act == \"+\": act = prev_da _, swda_name =", "self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act == None: act =", "seq in seqs] tok_seqs = [[w.lower() for w in utt]", "be equal\" if amount == 0: return [] permutations =", "\"nn\": 13, \"bk\": 14, \"h\": 15, \"qy^d\": 16, \"o\": 17,", "len(sents) == len(sent_DAs), \"length of permuted sentences and list of", "break return permutations[1:] def utterance_insertions(length, amount): possible_permutations = [] original", "be the dir with the respective train/test/val-dataset files print(\"Creating {}", "type=int, default=42, help=\"random seed for initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random", "insert_da, name, ix, insert_ix) = p a = \" \".join([str(a)", "\"could not find input files\" assert os.path.isfile(self.act_utt_file), \"missing act_utt.txt in", "if not os.path.exists(os.path.join(self.data_dir, 'train')): os.makedirs(os.path.join(self.data_dir, 'train')) if not os.path.exists(os.path.join(self.data_dir, 'validation')):", "u = str(utterances) pa = [acts[i] for i in p]", "args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list() for", "literal_eval import pandas as pd from math import factorial import", "x: (x-speaker) % 2 != 0, segments)) #TODO: rename either", "import CorpusReader, Transcript, Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def", "name, ix, insert_ix) = p a = \" \".join([str(a) for", "[acts[i] for i in p] p_a = \" \".join([str(a) for", "for x in pa]) pu = [utterances[i] for i in", "segment_permutations.append(segm_perm) break return permutations, segment_permutations def swda_utterance_sampling(self, speaker_ixs, amount): segm_ixs", "range(amounts): while True: # actually: do ... while permutation not", "}) d = defaultdict(lambda: 11) for (k, v) in mapping_dict.items():", "with the respective train/test/val-dataset files print(\"Creating {} perturbations for task", "length-1 else original[ix+1:]) ix_removed.insert(y, ix) possible_permutations.append(deepcopy(ix_removed)) permutations = [] for", "trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u)) if i in", "\"ba\": 6, \"qy\": 7, \"x\": 8, \"ny\": 9, \"fc\": 10,", "= np.random.permutation(speaker_to_perm).tolist() new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix)) if speaker == 0 :", "permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1] shuffle_file =", "swda_fname) # [:-4] with open(shuffle_file, \"w\") as f: csv_writer =", "and list of DAs must be equal\" if amount ==", "for segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation =", "os from copy import deepcopy from ast import literal_eval import", "not os.path.exists(os.path.join(self.data_dir, 'validation')): os.makedirs(os.path.join(self.data_dir, 'validation')) if not os.path.exists(os.path.join(self.data_dir, 'test')): os.makedirs(os.path.join(self.data_dir,", "draw_rand_sent(self): r = random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating", "sentence = self.word2id(self.tokenizer(sentence)) utterances.append(sentence) act = utt.damsl_act_tag() if act ==", "permutation = [] for segm_ix in new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs,", "parser.add_argument('--word2id', action='store_true', help= \"convert the words to ids\") parser.add_argument('--task', required=True,", "32, \"no\": 33, \"fp\": 34, \"qrr\": 35, \"arp\": 36, \"nd\":", "segm_ix in segm_perm: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation", "if i in self.test_ixs: testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) testfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in", "= list(filter(lambda x: (x-speaker) % 2 != 0, segments)) #TODO:", "self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in permuted_ixs: a =", "a list of different! permuted sentences and their respective dialog", "def draw_rand_sent(act_utt_df, sent_len, amount): \"\"\" df is supposed to be", "acts]) u = str(utterances) insert_sent, insert_da, name, ix, insert_ix =", "= open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile = open(test_output_file,", "files print(\"Creating {} perturbations for task {}\".format(amounts, self.task)) dial_file =", "self.task == 'up': permuted_ixs = permute(tok_seqs, acts, amounts) elif self.task", "[acts[i] for i in p] p_a = \" \".join([str(x) for", "for i in p] p_a = \" \".join([str(a) for a", "new_segments: utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix)) permutation = permutation + utt_ixs", "seqs = seqs[:-1] if len(seqs) < 5: continue tok_seqs =", "sentence = self.word2id(self.tokenizer(sentence)) act = utt.damsl_act_tag() if act == None:", "in permutations i_from = random.randint(0, len(segments)-1) i_to = random.randint(0, len(segments)-2)", "(x-speaker) % 2 == 0, segments)) speaker_orig = list(filter(lambda x:", "or self.setname == 'validation' or self.setname == 'test', \"wrong data", "'us': for perm in permuted_ixs: (utt, da, name, ix, insert_ix)", "respective dialog acts \"\"\" \"\"\" if amount is greater than", "[self.tokenizer(seq) for seq in seqs] tok_seqs = [[w.lower() for w", "range(amount): i = random.randint(0, len(possible_permutations)-1) permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter:", "= list(filter(lambda x: (x-speaker) % 2 == 0, segments)) speaker_orig", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"--datadir\", required=True, type=str, help=\"\"\"The input directory", "possible_permutations = [] original = list(range(length)) for ix in original:", "= prev_da _, swda_name = os.path.split(utt.swda_filename) swda_name = swda_name[:-4] if", "corpus are located. \"\"\") parser.add_argument(\"--corpus\", required=True, type=str, help=\"\"\"the name of", "must be equal\" if amount == 0: return [] permutations", "dict() prev_speaker = speaker_ixs[0] for j,speaker in enumerate(speaker_ixs): if speaker", "= word2id self.output_file = None self.task = task self.ranking_dataset =", "self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task)) root_data_dir = os.path.split(self.data_dir)[0] shuffled_path = os.path.join(root_data_dir,", "p insert_da = self.da2num[insert_da] p_a = deepcopy(acts) p_a[insert_ix] = insert_da", "= random.randint(0, len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab file", ", segment_permutations #the first one is the original, which was", "for w in sentence: cnt[w] += 1 itos_file = os.path.join(self.data_dir,", "'utt' (utterance), with act being a number from 1 to", "a in pa]) pu = [tok_seqs[i] for i in p]", "20, \"na\": 21, \"ny^e\": 22, \"ad\": 23, \"^2\": 24, \"b^m\":", "'test')) trainfile = open(train_output_file, 'w') valfile = open(val_output_file, 'w') testfile", "seed for initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random seed for initialization\")", "(k, v) in mapping_dict.items(): d[k] = v return d if", "segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem]", "self.task == 'us': for p in permuted_ixs: a = \"", "in permuted_ixs: a = \" \".join([str(a) for a in acts])", "df['act'][ix], df['dialogue'][ix], df['ix'][ix] def half_perturb(sents, sent_DAs, amount): assert len(sents) ==", "discarded = 0 for line_count, (dial, act) in tqdm(enumerate(zip(df, af)),", "in p] p_u = str(pu) if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u))", "\"^h\": 28, \"ar\": 29, \"ng\": 30, \"nn^e\": 31, \"br\": 32,", "data_dir\" with open(self.act_utt_file, 'r') as f: act_utt_df = pd.read_csv(f, sep='|',", "from 1 to 4 and utt being a sentence \"\"\"", "not permutation in permutations: permutations.append(permutation) break return permutations[1:], segment_permutations def", "valfile = open(val_output_file, 'w') testfile = open(test_output_file, 'w') shuffled_path =", "i += 1 segment_indices[j] = i return segment_indices def swda_half_perturb(self,", "ix, insert_ix) = perm row = [name, ix,insert_ix] csv_writer.writerow(row) else:", "= [] permutations = [list(segm_ixs.keys())] for _ in range(amount): speaker", "permutations.append(permutation.tolist()) return permutations[1:] #the first one is the original, which", "1 self.trans_num = 0 for trans in self.corpus.iter_transcripts(): self.trans_num +=", "= [list(segm_ixs.keys())] for _ in range(amount): speaker = random.randint(0,1) #", "p] p_u = str(pu) if i in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,p_a,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",p_a,p_u,a,u))", "_) in cnt.most_common(25000): itosf.write(\"{}\\n\".format(word)) #getKeysByValue def swda_permute(self, sents, amount, speaker_ixs):", "args.task, args.seed) converter.create_vocab() converter.convert_dset(amounts=args.amount) def getKeysByValue(dictOfElements, valueToFind): listOfKeys = list()", "for a in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent)", "i = 0 for _ in range(amounts): while True: #", "number from 1 to 4 and utt being a sentence", "os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file, \"w\") for (word, _) in", "len(self.utt_da_pairs)-1) return self.utt_da_pairs[r] def create_vocab(self): print(\"Creating Vocab file for Switchboard\")", "[list(segm_ixs.keys())] for _ in range(amount): speaker = random.randint(0,1) # choose", "== 'ui': permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts) swda_fname = os.path.split(trans.swda_filename)[1]", "self.draw_rand_sent() insert_ix = random.choice(segments) permutations.append((sentence, act, swda_name, ix, insert_ix)) return", "Utterance act2word = {1:\"inform\",2:\"question\", 3:\"directive\", 4:\"commissive\"} def permute(sents, sent_DAs, amount):", "for utt in self.corpus.iter_utterances(): sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\", \"\", utt.text) sentence", "corpus to use, currently either 'DailyDialog' or 'Switchboard' \"\"\") parser.add_argument('--seed',", "42, \"bd\": 43, \"aap\": 44, \"am\": 45, \"^g\": 46, \"qw^d\":", "utt_ixs if not permutation in permutations: permutations.append(permutation) break return permutations[1:],", "to the created # splits from the constructor train_output_file =", "segment_permutations = [] amount = min(amount, factorial(len(sents))-1) segm_ixs = self.speaker_segment_ixs(speaker_ixs)", "range(amount): while True: speaker = random.randint(0,1) # choose one of", "return a list of different! permuted sentences and their respective", "p_a = deepcopy(acts) p_a[insert_ix] = insert_da pa = \" \".join([str(a)", "permutations = [list(range(len(sents)))] segment_permutations = [] amount = min(amount, factorial(len(sents))-1)", "enumerate(f): word2id_dict[word[:-1].lower()] = i word2id = lambda x: [word2id_dict[y] for", "== permutations[0]) and ( not new_sents in permutations or len(permutations)", "i word2id = lambda x: [word2id_dict[y] for y in x]", "permutations.append((utt, da, name, ix, sent_insert_ix)) return permutations def draw_rand_sent_from_df(df): ix", "permutations.append(possible_permutations[i]) return permutations class DailyDialogConverter: def __init__(self, data_dir, tokenizer, word2id,", "segm_ix)) permutation = permutation + utt_ixs if permutation not in", "\"shuffled_{}\".format(self.task)) if not os.path.isdir(shuffled_path): os.mkdir(shuffled_path) assert os.path.isfile(dial_file) and os.path.isfile(act_file), \"could", "amounts): # data_dir is supposed to be the dir with", "acts = [int(act) for act in acts] for utt_i, (act,", "segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]", "+= 1 itos_file = os.path.join(self.data_dir, \"itos.txt\") itosf = open(itos_file, \"w\")", "if args.corpus == 'DailyDialog': converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)", "#the first one is the original, which was included s.t.", "\"%\": 11, \"qw\": 12, \"nn\": 13, \"bk\": 14, \"h\": 15,", "delimiter='|') for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118): seqs", "y in x] # don't convert words to ids (yet).", "in self.corpus.iter_utterances(): self.utt_num += 1 self.trans_num = 0 for trans", "def convert_dset(self, amounts): # data_dir is supposed to be the", "act if \"A\" in utt.caller: speaker_ixs.append(0) else: speaker_ixs.append(1) if self.task", "(utterance insertion, nothing directly added!)\"\"\") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed)", "choose one of the speakers speaker_to_perm = list(filter(lambda x: (x-speaker)", "of the corpus to use, currently either 'DailyDialog' or 'Switchboard'", "deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) else: for p in", "task='', ranking_dataset = True): self.data_dir = data_dir self.act_utt_file = os.path.join(data_dir,", "in self.train_ixs: trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) trainfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u)) if i in self.val_ixs: valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) valfile.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))", "in seqs] tok_seqs = [[w.lower() for w in utt] for", "directory where the files of the corpus are located. \"\"\")", "amounts) elif self.task == 'hup': permuted_ixs , segment_perms = self.swda_half_perturb(amounts,", "help=\"random seed for initialization\") parser.add_argument('--amount', type=int, default=20, help=\"random seed for", "7, \"x\": 8, \"ny\": 9, \"fc\": 10, \"%\": 11, \"qw\":", "\"nd\": 37, \"t3\": 38, \"oo\": 39, \"co\": 40, \"cc\": 41,", "= self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = [] permutations =", "with act being a number from 1 to 4 and", "prev_act = \"%\" for utt in trans.utterances: sentence = re.sub(r\"([+/\\}\\[\\]]|\\{\\w)\",", "_ in range(amount): speaker = random.randint(0,1) # choose one of", "the seed is the same s.t. the splits will be", "act.split(' ') acts = acts[:-1] acts = [int(act) for act", "= self.swda_utterance_sampling(speaker_ixs, amounts) elif self.task == 'hup': permuted_ixs , segment_perms", "permutations = [] for i in range(amount): (sentence, act, swda_name,", "import literal_eval import pandas as pd from math import factorial", "in acts]) u = str(tok_seqs) p_a = deepcopy(acts) p_a[insert_ix] =", "where the files of the corpus are located. \"\"\") parser.add_argument(\"--corpus\",", "[] permutations = [list(segm_ixs.keys())] for _ in range(amount): speaker =", "return permutations def convert_dset(self, amounts): # create distinct train/validation/test files.", "utterances = [] acts = [] speaker_ixs = [] prev_act", "return permutations def draw_rand_sent_from_df(df): ix = random.randint(0, len(df['utt'])-1) return literal_eval(df['utt'][ix]),", "\"nn^e\": 31, \"br\": 32, \"no\": 33, \"fp\": 34, \"qrr\": 35,", "name\" def create_act_utt(self): dial_file = os.path.join(self.data_dir, \"dialogues_{}.txt\".format(self.setname)) act_file = os.path.join(self.data_dir,", "defaultdict(lambda: 11) for (k, v) in mapping_dict.items(): d[k] = v", "sklearn.model_selection import train_test_split from swda.swda import CorpusReader, Transcript, Utterance act2word", "= segm_perm[0:i_from] + segm_perm[i_from+1:] segm_perm = segm_perm[0:i_to] + [rem_elem] +", "= draw_rand_sent_from_df(act_utt_df) sent_insert_ix = random.randint(0, sent_len-1) permutations.append((utt, da, name, ix,", "len(seqs) < 5: continue tok_seqs = [self.tokenizer(seq) for seq in", "sents, amount, speaker_ixs): if amount == 0: return [] permutations", "speaker_ixs): segm_ixs = self.speaker_segment_ixs(speaker_ixs) segments = list(set(segm_ixs.values())) segment_permutations = []", "in p_a]) p_u = deepcopy(tok_seqs) p_u[insert_ix] = self.word2id(insert_sent) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"0\",a,u,pa,p_u)) of.write(\"{}|{}|{}|{}|{}\\n\".format(\"1\",pa,p_u,a,u))", "test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed) self.train_ixs, self.val_ixs, self.test_ixs =", "included s.t. won't be generated def speaker_segment_ixs(self, speaker_ixs): i =", "of different! permuted sentences and their respective dialog acts \"\"\"" ]
[ "= \"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str", "janitor.utils import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str = \"(1,000)\" assert", "\"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str =", "assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = \"()\"", "== float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = \"()\" assert _clean_accounting_column(test_str)", "_clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = \"()\" assert", "def test_clean_accounting_column(): test_str = \"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils", "@pytest.mark.utils def test_clean_accounting_column(): test_str = \"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000)", "@pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = \"()\" assert _clean_accounting_column(test_str) == 0.00", "test_clean_accounting_column(): test_str = \"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def", "import pytest from janitor.utils import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str", "_clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str = \"(1,000)\" assert _clean_accounting_column(test_str) ==", "pytest from janitor.utils import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str =", "import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str = \"(1,000)\" assert _clean_accounting_column(test_str)", "from janitor.utils import _clean_accounting_column @pytest.mark.utils def test_clean_accounting_column(): test_str = \"(1,000)\"", "test_str = \"(1,000)\" assert _clean_accounting_column(test_str) == float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes():", "float(-1000) @pytest.mark.utils def test_clean_accounting_column_zeroes(): test_str = \"()\" assert _clean_accounting_column(test_str) ==" ]
[ "= \"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1", "if anchors: link = anchors[0]['href'] title = g.find('h3').text item =", "{format(time.time() - start, '0.5f')}\") start = time.time() def main(argv): download(argv[1])", "Q_TYPE init_start = time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache)", "= [] for g in soup.find_all('div', class_='r'): anchors = g.find_all('a')", "anchors = g.find_all('a') if anchors: link = anchors[0]['href'] title =", "= query_text query = query.replace(' ', '+') if Q_TYPE <=", "writeJson(json, filePath): f = open(filePath, 'w') f.write(json) f.close() def printTime(text):", "\"link\": link } results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL) !=", "Gecko/20100101 Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android", "def downloadURL(URL): # desktop user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel", ": \"public,max-age=3600\"} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"no-cache\"}", "text: return text[0:text.find(\"감독\")] elif \"등장인물\" in text: return text[0:text.find(\"등장인물\")] elif", "' ', str(pWithoutTag), 0).strip() imgTag = \"http:\" + soup.find('a', {'class':'image'}).find('img')['src']", "text[0:text.find(\"감독\")] elif \"등장인물\" in text: return text[0:text.find(\"등장인물\")] elif \"누구야\" in", "if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = \"위키피디아가 없네요. 링크를 열어보세요\"", "SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2: URL", "링크를 열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200:", "+ \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\"", "!= -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link']", "\"link\" : f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" : f\"{NO_RESULT}\", \"link\" :", "elif \"찾아줘\" in text: return text[0:text.find(\"찾아줘\")] elif \"언제야\" in text:", "elif \"누구야\" in text: Q_TYPE = 1 else: Q_TYPE =", "Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile user-agent", "# desktop user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS", "text[0:text.find(\"어디\")] elif \"뭐야\" in text: return text[0:text.find(\"뭐야\")] else : return", "checkQType(text) query_text = preprocessor(text) ## 1st SEARCH query = query_text", "coding=utf-8 import sys, getopt import urllib import requests import requests_cache", "rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux;", "item = { \"title\": title, \"link\": link } results.append(item) #print(link)", "URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL = f\"https://google.com/search?q={query}\" print(URL)", "f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" : f\"{NO_RESULT}\", \"link\"", "or \"뭐야\" in text: Q_TYPE = 2 elif \"누구야\" in", "= link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT:", "text: return text[0:text.find(\"누구야\")] elif \"알려줘\" in text: return text[0:text.find(\"알려줘\")] elif", "+ \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\"", "if \"감독\" in text: return text[0:text.find(\"감독\")] elif \"등장인물\" in text:", "in text: return text[0:text.find(\"어디\")] elif \"뭐야\" in text: return text[0:text.find(\"뭐야\")]", "GENERATE SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\"", "writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT =", "text[0:text.find(\"뭐야\")] else : return text def checkQType(text): global Q_TYPE if", "\"찾아줘\" in text: return text[0:text.find(\"찾아줘\")] elif \"언제야\" in text: return", "in text: return text[0:text.find(\"알려줘\")] elif \"보여줘\" in text: return text[0:text.find(\"보여줘\")]", "Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers = {\"user-agent\" : USER_AGENT} #headers", "else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE :", "f\"{NO_RESULT}\" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE", "\"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT = {", "'0.5f')}\") start = time.time() def main(argv): download(argv[1]) if __name__ ==", "', '+') if Q_TYPE <= 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\"", "SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT", "!= NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title')", "if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") p =", "urllib import requests import requests_cache import re import time from", "#resp = s.get(URL) results = [{\"title\" : f\"{NO_RESULT}\", \"link\" :", "10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0", "printTime(\"1st Search Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ##", "like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers = {\"user-agent\" : USER_AGENT}", "\"보여줘\" in text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text: return", "start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if True:", "headers = {\"user-agent\" : USER_AGENT} #headers = {\"user-agent\" : USER_AGENT,", "= FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" +", "p = soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag", "= f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag = f\"{NO_RESULT}\"", "title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link']", "f.write(json) f.close() def printTime(text): global start print(f\"[SEARCH] {text} : {format(time.time()", "SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) +", "## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE >", "f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag", "global Q_TYPE if \"감독\" in text or \"어디서\" in text", "YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT = { \"WIKI\"", "\"title\": title, \"link\": link } results.append(item) #print(link) global SEARCH_RESULT if", "in text: return text[0:text.find(\"감독\")] elif \"등장인물\" in text: return text[0:text.find(\"등장인물\")]", "BERT_SEARCH_JSON def preprocessor(text): if \"감독\" in text: return text[0:text.find(\"감독\")] elif", "in soup.find_all('div', class_='r'): anchors = g.find_all('a') if anchors: link =", "INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1", ": {format(time.time() - init_start, '0.5f')}\") return Q_TYPE def writeJson(json, filePath):", "\"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\", \"link\" :", "query = query_text query = query.replace(' ', '+') if Q_TYPE", "from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist, writeCache,", "return text[0:text.find(\"뭐야\")] else : return text def checkQType(text): global Q_TYPE", "NO_RESULT and Q_TYPE > 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL)", ": f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\",", "link } results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL) != -1", "= s.get(URL) results = [{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}]", "print(f\"[SEARCH] {text} : {format(time.time() - start, '0.5f')}\") start = time.time()", "1st SEARCH query = query_text query = query.replace(' ', '+')", "+ \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\"", "#s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp = s.get(URL) results =", "preprocessor(text) ## 1st SEARCH query = query_text query = query.replace('", "# mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android 7.0; SM-G930V", "OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT", "= BeautifulSoup(resp.content, \"lxml\") results = [] for g in soup.find_all('div',", "re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n', ' ', str(pWithoutTag),", "NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] !=", "requests import requests_cache import re import time from bs4 import", "{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\"", "\"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" +", "= FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON)", "= [{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code", "Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT = \"no_result\"", "f\"resp.status_code {resp.status_code}\" return results def download(text): global cache cache =", "USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0)", "pWithoutTag = f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ## 2nd SEARCH if", "NO_RESULT: pWithoutTag = \"위키피디아가 없네요. 링크를 열어보세요\" else: resp =", "BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON =", "SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = \"위키피디아가 없네요. 링크를 열어보세요\" else:", "\"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" :", "= re.sub('\\n', ' ', str(pWithoutTag), 0).strip() imgTag = \"http:\" +", "downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\"", "Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON", "getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if \"감독\"", "text or \"뭐야\" in text: Q_TYPE = 2 elif \"누구야\"", "in text: return text[0:text.find(\"등장인물\")] elif \"누구야\" in text: return text[0:text.find(\"누구야\")]", "imgTag = \"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT", "f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\", \"link\"", "= open(filePath, 'w') f.write(json) f.close() def printTime(text): global start print(f\"[SEARCH]", "\"cache-contorl\" : \"public,max-age=3600\"} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" :", "time : {format(time.time() - init_start, '0.5f')}\") return Q_TYPE def writeJson(json,", "= CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}\")", "cacheExist, writeCache, readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def", "text[0:text.find(\"등장인물\")] elif \"누구야\" in text: return text[0:text.find(\"누구야\")] elif \"알려줘\" in", "time from bs4 import BeautifulSoup from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\")", ": {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\",", "time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if True: checkQType(text) query_text", "BeautifulSoup(resp.content, \"lxml\") results = [] for g in soup.find_all('div', class_='r'):", "str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n', ' ', str(pWithoutTag), 0).strip() imgTag", "else : URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search Time\")", "= re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n', ' ',", "\"no-cache\"} #s = Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp", "JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\")", ": f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code == 200: soup = BeautifulSoup(resp.content,", "text: Q_TYPE = 2 elif \"누구야\" in text: Q_TYPE =", "JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 + pWithoutTag + JSON_2", "= \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON,", "text: return text[0:text.find(\"언제\")] elif \"어디\" in text: return text[0:text.find(\"어디\")] elif", "\"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\"", "SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link')", "+ SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE)", "== NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL)", "return text[0:text.find(\"알려줘\")] elif \"보여줘\" in text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\"", "= Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp = s.get(URL)", "Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile", "Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON =", "= g.find_all('a') if anchors: link = anchors[0]['href'] title = g.find('h3').text", "results def download(text): global cache cache = getDownloadCachePath(text) global start,", "NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link']", "Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ## 2nd SEARCH", "in text: return text[0:text.find(\"뭐야\")] else : return text def checkQType(text):", "= query.replace(' ', '+') if Q_TYPE <= 2: URL =", "## GENERATE BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\"", "3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE : \", Q_TYPE) WIKI_URL", "= JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search", "f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\"", "start print(f\"[SEARCH] {text} : {format(time.time() - start, '0.5f')}\") start =", "return Q_TYPE def writeJson(json, filePath): f = open(filePath, 'w') f.write(json)", "downloadURL(URL): # desktop user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac", "{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" :", "(KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers = {\"user-agent\" :", "\"lxml\") results = [] for g in soup.find_all('div', class_='r'): anchors", "\"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON)", "= time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if True: checkQType(text)", "SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] +", "BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time : {format(time.time() -", "BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache)", "writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON)", "printTime(text): global start print(f\"[SEARCH] {text} : {format(time.time() - start, '0.5f')}\")", "} def downloadURL(URL): # desktop user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh;", "\"위키피디아가 없네요. 링크를 열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code", "import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath", "USER_AGENT} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers", "== 200: soup = BeautifulSoup(resp.content, \"lxml\") results = [] for", "requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist, writeCache, readCache,", "title = g.find('h3').text item = { \"title\": title, \"link\": link", "writeCache, readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text):", "2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2:", "+ str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON)", "SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" +", "open(filePath, 'w') f.write(json) f.close() def printTime(text): global start print(f\"[SEARCH] {text}", "\"lxml\") p = soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip()", "f.close() def printTime(text): global start print(f\"[SEARCH] {text} : {format(time.time() -", "\"누구야\" in text: Q_TYPE = 1 else: Q_TYPE = 3", "= re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag = re.sub('\"', '', str(pWithoutTag),", "NO_RESULT = \"no_result\" SEARCH_RESULT = { \"WIKI\" : {\"title\" :", "in text: return text[0:text.find(\"누구야\")] elif \"알려줘\" in text: return text[0:text.find(\"알려줘\")]", "!= NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else:", "resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") results = []", "def printTime(text): global start print(f\"[SEARCH] {text} : {format(time.time() - start,", "writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE", "== NO_RESULT: pWithoutTag = \"위키피디아가 없네요. 링크를 열어보세요\" else: resp", "imgTag = f\"{NO_RESULT}\" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT", "Q_TYPE if \"감독\" in text or \"어디서\" in text or", "anchors[0]['href'] title = g.find('h3').text item = { \"title\": title, \"link\":", "\"WIKI\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\" :", "writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time : {format(time.time()", "SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE", "start = time.time() def main(argv): download(argv[1]) if __name__ == \"__main__\":", "= {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers = {\"user-agent\"", "JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ##", "{\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers = {\"user-agent\" :", "{text} : {format(time.time() - start, '0.5f')}\") start = time.time() def", "= BeautifulSoup(resp.content, \"lxml\") p = soup.find('p') pWithoutTag = re.sub('<.+?>', '',", "link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title", "desktop user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X", "= \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101", "def preprocessor(text): if \"감독\" in text: return text[0:text.find(\"감독\")] elif \"등장인물\"", "= f\"resp.status_code {resp.status_code}\" return results def download(text): global cache cache", "GENERATE BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON", "BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if \"감독\" in text: return text[0:text.find(\"감독\")]", "def checkQType(text): global Q_TYPE if \"감독\" in text or \"어디서\"", "pWithoutTag = re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n', '", "0).strip() pWithoutTag = re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n',", "= 2 elif \"누구야\" in text: Q_TYPE = 1 else:", "= 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE : \", Q_TYPE)", "requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") p", "readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total", "resp = requests.get(URL, headers=headers) #resp = s.get(URL) results = [{\"title\"", "= \"no_result\" SEARCH_RESULT = { \"WIKI\" : {\"title\" : f\"{NO_RESULT}\",", "pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag = re.sub('\"', '',", "2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT:", "return text[0:text.find(\"등장인물\")] elif \"누구야\" in text: return text[0:text.find(\"누구야\")] elif \"알려줘\"", "init_start = time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) ==", "g in soup.find_all('div', class_='r'): anchors = g.find_all('a') if anchors: link", "elif \"언제야\" in text: return text[0:text.find(\"언제\")] elif \"어디\" in text:", "CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE']", "return text[0:text.find(\"어디\")] elif \"뭐야\" in text: return text[0:text.find(\"뭐야\")] else :", "\"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" +", ": \", Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT", "{format(time.time() - init_start, '0.5f')}\") return Q_TYPE def writeJson(json, filePath): f", "f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\",", "0).strip() pWithoutTag = re.sub('\\n', ' ', str(pWithoutTag), 0).strip() imgTag =", "URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag =", "global cache cache = getDownloadCachePath(text) global start, Q_TYPE init_start =", "title, \"link\": link } results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL)", "+ \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\"", "\"no_result\" SEARCH_RESULT = { \"WIKI\" : {\"title\" : f\"{NO_RESULT}\", \"link\"", "Search Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ## 2nd", "Q_TYPE = 2 elif \"누구야\" in text: Q_TYPE = 1", "= { \"title\": title, \"link\": link } results.append(item) #print(link) global", "Q_TYPE <= 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL", "elif \"누구야\" in text: return text[0:text.find(\"누구야\")] elif \"알려줘\" in text:", "SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2: URL = f\"https://google.com/search?q={query}", "BeautifulSoup(resp.content, \"lxml\") p = soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p),", "= link elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT:", "str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT)", "#s = Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp =", "== NO_RESULT and Q_TYPE > 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\"", "= preprocessor(text) ## 1st SEARCH query = query_text query =", "in text: Q_TYPE = 1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE']", "-1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] =", "query = query.replace(' ', '+') if Q_TYPE <= 2: URL", "else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return results def download(text): global", "Total time : {format(time.time() - init_start, '0.5f')}\") return Q_TYPE def", "[] for g in soup.find_all('div', class_='r'): anchors = g.find_all('a') if", "<= 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL =", "- start, '0.5f')}\") start = time.time() def main(argv): download(argv[1]) if", "> 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] ==", "NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title']", "text: return text[0:text.find(\"어디\")] elif \"뭐야\" in text: return text[0:text.find(\"뭐야\")] else", ": f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"},", "re.sub('\\n', ' ', str(pWithoutTag), 0).strip() imgTag = \"http:\" + soup.find('a',", "sys, getopt import urllib import requests import requests_cache import re", "#headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s =", "import requests_cache import re import time from bs4 import BeautifulSoup", "\"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\" writeJson(FULL_JSON,", "and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] =", "import requests import requests_cache import re import time from bs4", "in text: return text[0:text.find(\"찾아줘\")] elif \"언제야\" in text: return text[0:text.find(\"언제\")]", "+ soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\"", "if True: checkQType(text) query_text = preprocessor(text) ## 1st SEARCH query", "URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag", "str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json']", "= requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\")", "return text[0:text.find(\"감독\")] elif \"등장인물\" in text: return text[0:text.find(\"등장인물\")] elif \"누구야\"", "\", Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT =", "time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if", "SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code", "= 1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION", "\"등장인물\" in text: return text[0:text.find(\"등장인물\")] elif \"누구야\" in text: return", "== NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link']", "str(pWithoutTag), 0).strip() imgTag = \"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE", "results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link']", ": f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" :", "SEARCH query = query_text query = query.replace(' ', '+') if", "= time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False:", "results = [{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if", "return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text: return text[0:text.find(\"찾아줘\")] elif \"언제야\"", "= \"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT = { \"WIKI\" :", "## GENERATE SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] +", "in text or \"뭐야\" in text: Q_TYPE = 2 elif", "SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title'] =", "text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text: return text[0:text.find(\"찾아줘\")] elif", "or \"어디서\" in text or \"언제\" in text or \"뭐야\"", "utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON,", "print(resp.status_code) if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") results", "in text or \"언제\" in text or \"뭐야\" in text:", "= title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and", "return text[0:text.find(\"찾아줘\")] elif \"언제야\" in text: return text[0:text.find(\"언제\")] elif \"어디\"", "= {\"user-agent\" : USER_AGENT} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\"", "+ pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json']", "\"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" } def downloadURL(URL): #", "pWithoutTag = re.sub('\\n', ' ', str(pWithoutTag), 0).strip() imgTag = \"http:\"", "print(URL) downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag = f\"{NO_RESULT}\" imgTag =", "'w') f.write(json) f.close() def printTime(text): global start print(f\"[SEARCH] {text} :", "= Q_TYPE print(\"QUESTION TYPE : \", Q_TYPE) WIKI_URL = \"wikipedia.org\"", "+ JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON", "\"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1 =", "f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st", "SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] !=", "soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2", "BeautifulSoup from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist,", "+ SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title']", "SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) +", "SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'],", "= \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\"", "checkQType(text): global Q_TYPE if \"감독\" in text or \"어디서\" in", "start, '0.5f')}\") start = time.time() def main(argv): download(argv[1]) if __name__", "Q_TYPE print(\"QUESTION TYPE : \", Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL", "200: soup = BeautifulSoup(resp.content, \"lxml\") p = soup.find('p') pWithoutTag =", "import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if \"감독\" in text: return", "NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) !=", "SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link if", "user-agent USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14;", "SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return results def download(text): global cache", "\"cache-contorl\" : \"no-cache\"} #s = Session() #s.headers.update(headers) resp = requests.get(URL,", "downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = \"위키피디아가 없네요. 링크를", "link elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title']", "#headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers =", "f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" } def downloadURL(URL):", "in text: return text[0:text.find(\"언제\")] elif \"어디\" in text: return text[0:text.find(\"어디\")]", "= results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\"", "= \"위키피디아가 없네요. 링크를 열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if", "- init_start, '0.5f')}\") return Q_TYPE def writeJson(json, filePath): f =", "= title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) != -1 and", "download(text): global cache cache = getDownloadCachePath(text) global start, Q_TYPE init_start", "if Q_TYPE <= 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else :", "query.replace(' ', '+') if Q_TYPE <= 2: URL = f\"https://google.com/search?q={query}", "re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag = re.sub('\"', '', str(pWithoutTag), 0).strip()", "\"감독\" in text or \"어디서\" in text or \"언제\" in", "(Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125", "import cacheExist, writeCache, readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON", "cache = getDownloadCachePath(text) global start, Q_TYPE init_start = time.time() start", "text: return text[0:text.find(\"알려줘\")] elif \"보여줘\" in text: return text[0:text.find(\"보여줘\")] elif", "SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE : \", Q_TYPE) WIKI_URL =", "\"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" }", "= readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH]", "-1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] =", "!= -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link']", "from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if \"감독\" in", "{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" :", "if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") results =", "} results.append(item) #print(link) global SEARCH_RESULT if link.find(WIKI_URL) != -1 and", "+ \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else:", "and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title SEARCH_RESULT['YOUTUBE']['link'] = link", "user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36", ": f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" :", "soup = BeautifulSoup(resp.content, \"lxml\") results = [] for g in", "filePath): f = open(filePath, 'w') f.write(json) f.close() def printTime(text): global", "resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup = BeautifulSoup(resp.content,", "f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = \"위키피디아가", "SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] +", "results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return results def download(text):", "return text def checkQType(text): global Q_TYPE if \"감독\" in text", "False: if True: checkQType(text) query_text = preprocessor(text) ## 1st SEARCH", "readCache, getDownloadCachePath from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if", "\"어디\" in text: return text[0:text.find(\"어디\")] elif \"뭐야\" in text: return", "Q_TYPE = 1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE", "pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] =", ": URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search Time\") pWithoutTag", "{ \"WIKI\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\"", ": USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers = {\"user-agent\" : USER_AGENT,", ": f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" } def", "elif \"어디\" in text: return text[0:text.find(\"어디\")] elif \"뭐야\" in text:", "= time.time() def main(argv): download(argv[1]) if __name__ == \"__main__\": main(sys.argv)", "\"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 + pWithoutTag +", "getDownloadCachePath(text) global start, Q_TYPE init_start = time.time() start = time.time()", ": {format(time.time() - start, '0.5f')}\") start = time.time() def main(argv):", ": {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\"", "열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup", "f\"{NO_RESULT}\"}, \"test_input.json\" : f\"{NO_RESULT}\", \"search_result.json\" : f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\"", "requests.get(URL, headers=headers) #resp = s.get(URL) results = [{\"title\" : f\"{NO_RESULT}\",", "Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers =", "+ SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title']", "bs4 import BeautifulSoup from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils", "\"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code == 200: soup =", "class_='r'): anchors = g.find_all('a') if anchors: link = anchors[0]['href'] title", "cacheExist(cache) == False: if True: checkQType(text) query_text = preprocessor(text) ##", "return results def download(text): global cache cache = getDownloadCachePath(text) global", "= anchors[0]['href'] title = g.find('h3').text item = { \"title\": title,", "SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers", "mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M)", "resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\") p = soup.find('p')", "getopt import urllib import requests import requests_cache import re import", "f\"{NO_RESULT}\" } def downloadURL(URL): # desktop user-agent USER_AGENT = \"Mozilla/5.0", "200: soup = BeautifulSoup(resp.content, \"lxml\") results = [] for g", "elif \"알려줘\" in text: return text[0:text.find(\"알려줘\")] elif \"보여줘\" in text:", "in text: Q_TYPE = 2 elif \"누구야\" in text: Q_TYPE", "= g.find('h3').text item = { \"title\": title, \"link\": link }", "global start, Q_TYPE init_start = time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache')", "SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link elif", "f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" } def downloadURL(URL): # desktop user-agent", ": {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\"", "query_text = preprocessor(text) ## 1st SEARCH query = query_text query", "writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time", "utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON def preprocessor(text): if \"감독\" in text:", "g.find_all('a') if anchors: link = anchors[0]['href'] title = g.find('h3').text item", "+ \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] =", ": \"no-cache\"} #s = Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers)", "USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\"", "s.get(URL) results = [{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code)", "text[0:text.find(\"찾아줘\")] elif \"언제야\" in text: return text[0:text.find(\"언제\")] elif \"어디\" in", "elif \"등장인물\" in text: return text[0:text.find(\"등장인물\")] elif \"누구야\" in text:", "return text[0:text.find(\"언제\")] elif \"어디\" in text: return text[0:text.find(\"어디\")] elif \"뭐야\"", "', str(pWithoutTag), 0).strip() imgTag = \"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ##", "#if cacheExist(cache) == False: if True: checkQType(text) query_text = preprocessor(text)", "= \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT =", "in text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text: return text[0:text.find(\"찾아줘\")]", "\"감독\" in text: return text[0:text.find(\"감독\")] elif \"등장인물\" in text: return", "elif \"뭐야\" in text: return text[0:text.find(\"뭐야\")] else : return text", "SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] +", "for g in soup.find_all('div', class_='r'): anchors = g.find_all('a') if anchors:", "BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time :", "import re import time from bs4 import BeautifulSoup from requests", "+ SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag)", "if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] =", "\"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" +", "else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON) Q_TYPE =", "= { \"WIKI\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"},", "== False: if True: checkQType(text) query_text = preprocessor(text) ## 1st", "preprocessor(text): if \"감독\" in text: return text[0:text.find(\"감독\")] elif \"등장인물\" in", "SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\"", "1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE", "and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link", "Session() #s.headers.update(headers) resp = requests.get(URL, headers=headers) #resp = s.get(URL) results", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\" headers = {\"user-agent\"", "FULL_JSON = JSON_1 + pWithoutTag + JSON_2 writeJson(FULL_JSON, BERT_INPUT_JSON) printTime(\"2nd", "JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 +", "pWithoutTag = \"위키피디아가 없네요. 링크를 열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link'])", "text[0:text.find(\"알려줘\")] elif \"보여줘\" in text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in", "{ \"title\": title, \"link\": link } results.append(item) #print(link) global SEARCH_RESULT", "or \"언제\" in text or \"뭐야\" in text: Q_TYPE =", "= soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag =", "+ SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['WIKI']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link']", "text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text: return text[0:text.find(\"찾아줘\")] elif \"언제야\" in", "\"Q_TYPE\" : f\"{NO_RESULT}\" } def downloadURL(URL): # desktop user-agent USER_AGENT", "(Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" #", "= f\"{NO_RESULT}\" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and", "import time from bs4 import BeautifulSoup from requests import Session", "headers=headers) #resp = s.get(URL) results = [{\"title\" : f\"{NO_RESULT}\", \"link\"", "\"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT", "printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH RESULT", "= f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag =", "query_text query = query.replace(' ', '+') if Q_TYPE <= 2:", "\"public,max-age=3600\"} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s", "f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title'] ==", "= \"Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like", "\"누구야\" in text: return text[0:text.find(\"누구야\")] elif \"알려줘\" in text: return", "requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if cacheExist(cache) == False: if True: checkQType(text) query_text =", "[{\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code ==", "text[0:text.find(\"언제\")] elif \"어디\" in text: return text[0:text.find(\"어디\")] elif \"뭐야\" in", "global SEARCH_RESULT if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT:", "break SEARCH_RESULT['FIRST']['title'] = results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] =", "\"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON", "2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL = f\"https://google.com/search?q={query}\"", "'', str(p), 0).strip() pWithoutTag = re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag", "Chrome/59.0.3071.125 Mobile Safari/537.36\" headers = {\"user-agent\" : USER_AGENT} #headers =", ": f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" : f\"{NO_RESULT}\",", "0).strip() imgTag = \"http:\" + soup.find('a', {'class':'image'}).find('img')['src'] ## GENERATE BERT", "import BeautifulSoup from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import", "in text or \"어디서\" in text or \"언제\" in text", "## 1st SEARCH query = query_text query = query.replace(' ',", "Q_TYPE = CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time : {format(time.time() - init_start,", "\"뭐야\" in text: Q_TYPE = 2 elif \"누구야\" in text:", "FULL_JSON ## GENERATE SEARCH RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title']", "WIKI_URL = \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT", "def download(text): global cache cache = getDownloadCachePath(text) global start, Q_TYPE", "\"youtube.com/channel\" NO_RESULT = \"no_result\" SEARCH_RESULT = { \"WIKI\" : {\"title\"", "BERT_INPUT_JSON) printTime(\"2nd Search Time\") SEARCH_RESULT['test_input.json'] = FULL_JSON ## GENERATE SEARCH", "SEARCH_RESULT['WIKI']['title'] = title SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) != -1", "soup = BeautifulSoup(resp.content, \"lxml\") p = soup.find('p') pWithoutTag = re.sub('<.+?>',", "\"언제\" in text or \"뭐야\" in text: Q_TYPE = 2", "elif \"보여줘\" in text: return text[0:text.find(\"보여줘\")] elif \"찾아줘\" in text:", "cache cache = getDownloadCachePath(text) global start, Q_TYPE init_start = time.time()", ": USER_AGENT} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"public,max-age=3600\"}", "def writeJson(json, filePath): f = open(filePath, 'w') f.write(json) f.close() def", "if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2: URL =", "'', str(pWithoutTag), 0).strip() pWithoutTag = re.sub('\\n', ' ', str(pWithoutTag), 0).strip()", "text or \"어디서\" in text or \"언제\" in text or", "str(p), 0).strip() pWithoutTag = re.sub('\"', '', str(pWithoutTag), 0).strip() pWithoutTag =", "text: return text[0:text.find(\"등장인물\")] elif \"누구야\" in text: return text[0:text.find(\"누구야\")] elif", "init_start, '0.5f')}\") return Q_TYPE def writeJson(json, filePath): f = open(filePath,", "from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath from utils.path import", ": f\"{NO_RESULT}\" } def downloadURL(URL): # desktop user-agent USER_AGENT =", ": f\"{NO_RESULT}\", \"Q_TYPE\" : f\"{NO_RESULT}\" } def downloadURL(URL): # desktop", "else : return text def checkQType(text): global Q_TYPE if \"감독\"", "\"어디서\" in text or \"언제\" in text or \"뭐야\" in", "link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break", "text or \"언제\" in text or \"뭐야\" in text: Q_TYPE", "\"Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko)", "link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] = title", "{'class':'image'}).find('img')['src'] ## GENERATE BERT INPUT JSON_1 = \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 =", "import urllib import requests import requests_cache import re import time", "{resp.status_code}\" return results def download(text): global cache cache = getDownloadCachePath(text)", "if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT: break SEARCH_RESULT['FIRST']['title']", "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\"", "RESULT FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link']", "requests_cache import re import time from bs4 import BeautifulSoup from", "Safari/537.36\" headers = {\"user-agent\" : USER_AGENT} #headers = {\"user-agent\" :", "elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT: SEARCH_RESULT['YOUTUBE']['title'] =", "print(f\"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}\") return Q_TYPE", "text: return text[0:text.find(\"뭐야\")] else : return text def checkQType(text): global", "text: Q_TYPE = 1 else: Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] =", "{\"user-agent\" : USER_AGENT} #headers = {\"user-agent\" : USER_AGENT, \"cache-contorl\" :", "Q_TYPE > 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title']", "SEARCH_RESULT if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT: SEARCH_RESULT['WIKI']['title']", "'0.5f')}\") return Q_TYPE def writeJson(json, filePath): f = open(filePath, 'w')", "print(\"QUESTION TYPE : \", Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL =", "없네요. 링크를 열어보세요\" else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code ==", "\"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" +", "site:wikipedia.org\" else : URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL) printTime(\"1st Search", ": USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s = Session() #s.headers.update(headers) resp", "7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36\"", "# coding=utf-8 import sys, getopt import urllib import requests import", "soup.find_all('div', class_='r'): anchors = g.find_all('a') if anchors: link = anchors[0]['href']", "== 200: soup = BeautifulSoup(resp.content, \"lxml\") p = soup.find('p') pWithoutTag", "= f\"{NO_RESULT}\" imgTag = f\"{NO_RESULT}\" ## 2nd SEARCH if SEARCH_RESULT['WIKI']['title']", "text def checkQType(text): global Q_TYPE if \"감독\" in text or", "= getDownloadCachePath(text) global start, Q_TYPE init_start = time.time() start =", "\"FIRST\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\" :", "results = [] for g in soup.find_all('div', class_='r'): anchors =", ": f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code == 200:", "f\"{NO_RESULT}\"}, \"FIRST\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}, \"YOUTUBE\"", "= results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return results def", "Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android 7.0;", "+ str(imgTag) + \"\\\"}\" writeJson(FULL_JSON, BERT_SEARCH_JSON) SEARCH_RESULT['search_result.json'] = FULL_JSON writeCache(cache,", "True: checkQType(text) query_text = preprocessor(text) ## 1st SEARCH query =", "from bs4 import BeautifulSoup from requests import Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from", "f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code == 200: soup = BeautifulSoup(resp.content, \"lxml\")", "+ SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link']", "X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile user-agent MOBILE_USER_AGENT =", "\"뭐야\" in text: return text[0:text.find(\"뭐야\")] else : return text def", "= {\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s = Session()", "return text[0:text.find(\"누구야\")] elif \"알려줘\" in text: return text[0:text.find(\"알려줘\")] elif \"보여줘\"", "text: return text[0:text.find(\"찾아줘\")] elif \"언제야\" in text: return text[0:text.find(\"언제\")] elif", "CACHE_RESULT['Q_TYPE'] print(f\"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}\") return", "2 elif \"누구야\" in text: Q_TYPE = 1 else: Q_TYPE", "global start print(f\"[SEARCH] {text} : {format(time.time() - start, '0.5f')}\") start", "{\"user-agent\" : USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s = Session() #s.headers.update(headers)", "FULL_JSON writeCache(cache, SEARCH_RESULT) else: CACHE_RESULT = readCache(cache) writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON) writeJson(CACHE_RESULT['search_result.json'],", "FULL_JSON = \"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] +", "= \"{\\\"version\\\":\\\"mytest_dev\\\",\\\"data\\\":[{\\\"paragraphs\\\":[{\\\"qas\\\":[{\\\"answers\\\":[{\\\"text\\\":\\\"테스트\\\",\\\"answer_start\\\":0}],\\\"id\\\":\\\"1-1\\\",\\\"question\\\":\\\"테스트\\\"}],\\\"context\\\":\\\"\" JSON_2 = \"\\\"}],\\\"title\\\":\\\"테스트\\\"}]}\" FULL_JSON = JSON_1 + pWithoutTag", "MOBILE_USER_AGENT = \"Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML,", "Q_TYPE def writeJson(json, filePath): f = open(filePath, 'w') f.write(json) f.close()", "SEARCH_RESULT = { \"WIKI\" : {\"title\" : f\"{NO_RESULT}\", \"link\" :", "#print(link) global SEARCH_RESULT if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] ==", ": return text def checkQType(text): global Q_TYPE if \"감독\" in", "\"{\\\"google\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['FIRST']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['FIRST']['link'] + \"\\\"}],\\\"wiki\\\":[{\\\"title\\\":\\\"\" +", "Q_TYPE = 3 SEARCH_RESULT['Q_TYPE'] = Q_TYPE print(\"QUESTION TYPE : \",", "anchors: link = anchors[0]['href'] title = g.find('h3').text item = {", "if \"감독\" in text or \"어디서\" in text or \"언제\"", "= f\"https://google.com/search?q={query} site:wikipedia.org\" else : URL = f\"https://google.com/search?q={query}\" print(URL) downloadURL(URL)", "\"언제야\" in text: return text[0:text.find(\"언제\")] elif \"어디\" in text: return", "= requests.get(URL, headers=headers) #resp = s.get(URL) results = [{\"title\" :", "SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] +", "+ \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['WIKI']['link'] + \"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\"", "Mobile Safari/537.36\" headers = {\"user-agent\" : USER_AGENT} #headers = {\"user-agent\"", "g.find('h3').text item = { \"title\": title, \"link\": link } results.append(item)", "\"알려줘\" in text: return text[0:text.find(\"알려줘\")] elif \"보여줘\" in text: return", "\"\\\"}],\\\"youtube\\\":[{\\\"title\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['title'] + \"\\\",\\\"link\\\":\\\"\" + SEARCH_RESULT['YOUTUBE']['link'] + \"\\\"}],\\\"Q_TYPE\\\":\\\"\" +", "re import time from bs4 import BeautifulSoup from requests import", "text[0:text.find(\"누구야\")] elif \"알려줘\" in text: return text[0:text.find(\"알려줘\")] elif \"보여줘\" in", "site:wikipedia.org\" downloadURL(URL) if SEARCH_RESULT['WIKI']['title'] == NO_RESULT: pWithoutTag = \"위키피디아가 없네요.", "SEARCH_RESULT['WIKI']['link'] = link elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] ==", "soup.find('p') pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip() pWithoutTag = re.sub('\"',", "f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"}] print(resp.status_code) if resp.status_code == 200: soup", "USER_AGENT, \"cache-contorl\" : \"no-cache\"} #s = Session() #s.headers.update(headers) resp =", "title SEARCH_RESULT['YOUTUBE']['link'] = link if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link']", "import sys, getopt import urllib import requests import requests_cache import", "Session sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath from", "else: resp = requests.get(SEARCH_RESULT['WIKI']['link']) if resp.status_code == 200: soup =", "+ \"\\\"}],\\\"Q_TYPE\\\":\\\"\" + str(Q_TYPE) + \"\\\",\\\"IMG_SRC\\\":\\\"\" + str(imgTag) + \"\\\"}\"", "f = open(filePath, 'w') f.write(json) f.close() def printTime(text): global start", "Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0\" # mobile", "and Q_TYPE > 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" downloadURL(URL) if", "results[0].get('title') SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return", "link = anchors[0]['href'] title = g.find('h3').text item = { \"title\":", "SEARCH_RESULT['FIRST']['link'] = results[0].get('link') else: SEARCH_RESULT['FIRST']['title'] = f\"resp.status_code {resp.status_code}\" return results", "sys.path.append(\"/home/taejoon1kim/BERT/my_bert\") from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath from utils.path", "'+') if Q_TYPE <= 2: URL = f\"https://google.com/search?q={query} site:wikipedia.org\" else", "TYPE : \", Q_TYPE) WIKI_URL = \"wikipedia.org\" YOUTUBE_URL = \"youtube.com/channel\"", ": f\"{NO_RESULT}\"}, \"YOUTUBE\" : {\"title\" : f\"{NO_RESULT}\", \"link\" : f\"{NO_RESULT}\"},", "start, Q_TYPE init_start = time.time() start = time.time() requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache') #if" ]
[ "info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return info_list ''' course_list", "from xml.dom.minidom import Document, parse class InfoBatch: def __init__(self, title,", "parse class InfoBatch: def __init__(self, title, pre_node_titles): self.title = title", "in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles", "doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name)", "as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list = [] doc", "course in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data", "__init__(self, title, pre_node_titles): self.title = title self.pre_node_titles = pre_node_titles def", "for course in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles =", "pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return", "Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming',", "Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization']) save_data_xml(course_list, 'resource/data/data.xml')", "course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, []))", "Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])", "','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name", "doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with", "course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming'))", "course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture'))", "Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list,", "= doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name)", "'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list = []", "Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College", "after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path,", "= pre_node_titles def save_data_xml(course_list, file_path): doc = Document() courses =", "def __init__(self, title, pre_node_titles): self.title = title self.pre_node_titles = pre_node_titles", "doc.appendChild(courses) for course in course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name", "= course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title,", "= doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course)", "course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list,", "doc.createElement('course_list') doc.appendChild(courses) for course in course_list: single_course = doc.createElement('course') courses.appendChild(single_course)", "= [] doc = parse(file_path) courses = doc.getElementsByTagName(\"course\") for course", "save_data_xml(course_list, file_path): doc = Document() courses = doc.createElement('course_list') doc.appendChild(courses) for", "single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name)", "course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles))", "['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization'))", "['Advance Math', 'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list,", "Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list,", "pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError:", "f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list = [] doc =", "encoding='utf-8')) def load_data_xml(file_path): info_list = [] doc = parse(file_path) courses", "Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math'])", "course in course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name')", "Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented", "course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer", "doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8'))", "course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital", "open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list =", "single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course =", "after_course.appendChild(course_name) with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path):", "= doc.createElement('course_list') doc.appendChild(courses) for course in course_list: single_course = doc.createElement('course')", "course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic',", "pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name)", "doc = parse(file_path) courses = doc.getElementsByTagName(\"course\") for course in courses:", "Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance", "title self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path): doc = Document()", "[] doc = parse(file_path) courses = doc.getElementsByTagName(\"course\") for course in", "after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course)", "pre_node_titles def save_data_xml(course_list, file_path): doc = Document() courses = doc.createElement('course_list')", "f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list = [] doc = parse(file_path)", "courses = doc.createElement('course_list') doc.appendChild(courses) for course in course_list: single_course =", "pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return info_list ''' course_list =", "Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('College Physics'))", "= course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title,", "info_list.append(InfoBatch(title, [])) return info_list ''' course_list = [] course_list.append(Course('Advance Math'))", "doc.getElementsByTagName(\"course\") for course in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles", "Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math',", "= ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+')", "'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming',", "in course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name", "IndexError: info_list.append(InfoBatch(title, [])) return info_list ''' course_list = [] course_list.append(Course('Advance", "course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming'))", "self.title = title self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path): doc", "doc = Document() courses = doc.createElement('course_list') doc.appendChild(courses) for course in", "xml.dom.minidom import Document, parse class InfoBatch: def __init__(self, title, pre_node_titles):", "except IndexError: info_list.append(InfoBatch(title, [])) return info_list ''' course_list = []", "for course in course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name =", "= doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course)", "pre_node_titles): self.title = title self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path):", "def save_data_xml(course_list, file_path): doc = Document() courses = doc.createElement('course_list') doc.appendChild(courses)", "single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def", "['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented", "Math', 'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance", "Document, parse class InfoBatch: def __init__(self, title, pre_node_titles): self.title =", "course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name =", "file_path): doc = Document() courses = doc.createElement('course_list') doc.appendChild(courses) for course", "return info_list ''' course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra'))", "Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented", "course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization'])", "pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course)", "Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer", "doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name", "title, pre_node_titles): self.title = title self.pre_node_titles = pre_node_titles def save_data_xml(course_list,", "'Procedure Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math',", "course_list: single_course = doc.createElement('course') courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name =", "single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name)", "['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization']) save_data_xml(course_list,", "self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path): doc = Document() courses", "= Document() courses = doc.createElement('course_list') doc.appendChild(courses) for course in course_list:", "parse(file_path) courses = doc.getElementsByTagName(\"course\") for course in courses: title =", "course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure", "Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital", "doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course", "Oriented Programming', 'Digital Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure", "''' course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented", "Logic']) course_list.append(Course('Computer Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital", "Document() courses = doc.createElement('course_list') doc.appendChild(courses) for course in course_list: single_course", "course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic'])", "= doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course')", "courses.appendChild(single_course) single_course_name = doc.createElement('course_name') course_name = doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course", "course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented", "= parse(file_path) courses = doc.getElementsByTagName(\"course\") for course in courses: title", "= doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name)", "with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t', encoding='utf-8')) def load_data_xml(file_path): info_list", "'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization']) save_data_xml(course_list, 'resource/data/data.xml') '''", "course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure", "pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course =", "InfoBatch: def __init__(self, title, pre_node_titles): self.title = title self.pre_node_titles =", "single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name =", "','.join(course.after_course) course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as", "= doc.getElementsByTagName(\"course\") for course in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try:", "[])) return info_list ''' course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear", "course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented Programming')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])", "Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list,", "Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure", "course_name = doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as f:", "= pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return info_list", "= ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course')", "import Document, parse class InfoBatch: def __init__(self, title, pre_node_titles): self.title", "class InfoBatch: def __init__(self, title, pre_node_titles): self.title = title self.pre_node_titles", "course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name =", "doc.createTextNode(course.name) single_course.appendChild(single_course_name) single_course_name.appendChild(course_name) pre_course = doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name", "try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except", "= doc.createTextNode(after_course_name) single_course.appendChild(after_course) after_course.appendChild(course_name) with open(file_path, 'wb+') as f: f.write(doc.toprettyxml(indent='\\t',", "info_list ''' course_list = [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure", "['Procedure Oriented Programming']) course_list.append(Course('College Physics')) course_list[-1].add_pre_course(course_list, ['Advance Math']) course_list.append(Course('Digital Logic'))", "= title self.pre_node_titles = pre_node_titles def save_data_xml(course_list, file_path): doc =", "title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles = pre_node_titles.split(',')", "def load_data_xml(file_path): info_list = [] doc = parse(file_path) courses =", "pre_node_titles.split(',') info_list.append(InfoBatch(title, pre_node_titles)) except IndexError: info_list.append(InfoBatch(title, [])) return info_list '''", "= [] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object", "course_list.append(Course('Digital Logic')) course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming']) course_list.append(Course('Computer Organization')) course_list[-1].add_pre_course(course_list, ['Advance", "load_data_xml(file_path): info_list = [] doc = parse(file_path) courses = doc.getElementsByTagName(\"course\")", "[] course_list.append(Course('Advance Math')) course_list.append(Course('Linear Algebra')) course_list.append(Course('Procedure Oriented Programming')) course_list.append(Course('Object Oriented", "= doc.createElement('pre_course') pre_course_name = ','.join(course.pre_course) course_name = doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name)", "info_list = [] doc = parse(file_path) courses = doc.getElementsByTagName(\"course\") for", "Architecture')) course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer", "courses = doc.getElementsByTagName(\"course\") for course in courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data", "single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name =", "courses: title = course.getElementsByTagName(\"course_name\")[0].childNodes[0].data try: pre_node_titles = course.getElementsByTagName(\"pre_node_titles\")[0].childNodes[0].data pre_node_titles =", "doc.createTextNode(pre_course_name) single_course.appendChild(pre_course) pre_course.appendChild(course_name) after_course = doc.createElement('after_course') after_course_name = ','.join(course.after_course) course_name" ]
[ "import Command @pytest.fixture def output(target): return ('error: the following file", "foo', 'foo', ['git rm --cached foo', 'git rm -f foo']),", "'foo'), ('git rm foo bar', 'bar')]) def test_match(output, script, target):", "('git rm foo', 'foo', ['git rm --cached foo', 'git rm", "assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo', 'git rm foo", "not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [ ('git rm foo',", "['git rm --cached foo bar', 'git rm -f foo bar'])])", "rm foo bar', 'bar')]) def test_match(output, script, target): assert match(Command(script,", "['git rm --cached foo', 'git rm -f foo']), ('git rm", "local modifications:\\n {}\\n(use ' '--cached to keep the file, or", "@pytest.mark.parametrize('script, target', [ ('git rm foo', 'foo'), ('git rm foo", "to force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm foo', 'foo'),", "@pytest.fixture def output(target): return ('error: the following file has local", "match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar',", "foo bar', 'git rm']) def test_not_match(script): assert not match(Command(script, ''))", "foo bar', 'bar', ['git rm --cached foo bar', 'git rm", "theheck.types import Command @pytest.fixture def output(target): return ('error: the following", "@pytest.mark.parametrize('script, target, new_command', [ ('git rm foo', 'foo', ['git rm", "rm']) def test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command',", "target, new_command', [ ('git rm foo', 'foo', ['git rm --cached", "-f foo']), ('git rm foo bar', 'bar', ['git rm --cached", "'--cached to keep the file, or -f to force removal)').format(target)", "from theheck.rules.git_rm_local_modifications import match, get_new_command from theheck.types import Command @pytest.fixture", "'git rm -f foo']), ('git rm foo bar', 'bar', ['git", "foo', 'git rm foo bar', 'git rm']) def test_not_match(script): assert", "foo bar', 'git rm -f foo bar'])]) def test_get_new_command(output, script,", "force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm foo', 'foo'), ('git", "has local modifications:\\n {}\\n(use ' '--cached to keep the file,", "'bar', ['git rm --cached foo bar', 'git rm -f foo", "rm foo', 'foo', ['git rm --cached foo', 'git rm -f", "following file has local modifications:\\n {}\\n(use ' '--cached to keep", "foo', 'foo'), ('git rm foo bar', 'bar')]) def test_match(output, script,", "rm -f foo bar'])]) def test_get_new_command(output, script, target, new_command): assert", "target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo', 'git rm", "rm foo', 'git rm foo bar', 'git rm']) def test_not_match(script):", "--cached foo', 'git rm -f foo']), ('git rm foo bar',", "keep the file, or -f to force removal)').format(target) @pytest.mark.parametrize('script, target',", "bar', 'bar')]) def test_match(output, script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script',", "('git rm foo bar', 'bar', ['git rm --cached foo bar',", "('git rm foo bar', 'bar')]) def test_match(output, script, target): assert", "'')) @pytest.mark.parametrize('script, target, new_command', [ ('git rm foo', 'foo', ['git", "rm foo bar', 'git rm']) def test_not_match(script): assert not match(Command(script,", "file has local modifications:\\n {}\\n(use ' '--cached to keep the", "def test_match(output, script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm", "'git rm']) def test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script, target,", "['git rm foo', 'git rm foo bar', 'git rm']) def", "modifications:\\n {}\\n(use ' '--cached to keep the file, or -f", "foo', 'git rm -f foo']), ('git rm foo bar', 'bar',", "file, or -f to force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git", "Command @pytest.fixture def output(target): return ('error: the following file has", "removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm foo', 'foo'), ('git rm", "test_match(output, script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo',", "--cached foo bar', 'git rm -f foo bar'])]) def test_get_new_command(output,", "output(target): return ('error: the following file has local modifications:\\n {}\\n(use", "rm --cached foo bar', 'git rm -f foo bar'])]) def", "output)) @pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git", "{}\\n(use ' '--cached to keep the file, or -f to", "rm foo bar', 'bar', ['git rm --cached foo bar', 'git", "' '--cached to keep the file, or -f to force", "target', [ ('git rm foo', 'foo'), ('git rm foo bar',", "rm --cached foo', 'git rm -f foo']), ('git rm foo", "bar', 'git rm']) def test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script,", "pytest from theheck.rules.git_rm_local_modifications import match, get_new_command from theheck.types import Command", "rm foo', 'foo'), ('git rm foo bar', 'bar')]) def test_match(output,", "foo bar', 'bar')]) def test_match(output, script, target): assert match(Command(script, output))", "new_command', [ ('git rm foo', 'foo', ['git rm --cached foo',", "def test_get_new_command(output, script, target, new_command): assert get_new_command(Command(script, output)) == new_command", "rm -f foo']), ('git rm foo bar', 'bar', ['git rm", "'bar')]) def test_match(output, script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git", "foo']), ('git rm foo bar', 'bar', ['git rm --cached foo", "to keep the file, or -f to force removal)').format(target) @pytest.mark.parametrize('script,", "[ ('git rm foo', 'foo', ['git rm --cached foo', 'git", "@pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm'])", "script, target): assert match(Command(script, output)) @pytest.mark.parametrize('script', ['git rm foo', 'git", "-f to force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm foo',", "('git rm foo', 'foo'), ('git rm foo bar', 'bar')]) def", "theheck.rules.git_rm_local_modifications import match, get_new_command from theheck.types import Command @pytest.fixture def", "assert not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [ ('git rm", "'git rm foo bar', 'git rm']) def test_not_match(script): assert not", "the file, or -f to force removal)').format(target) @pytest.mark.parametrize('script, target', [", "import pytest from theheck.rules.git_rm_local_modifications import match, get_new_command from theheck.types import", "bar', 'git rm -f foo bar'])]) def test_get_new_command(output, script, target,", "match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [ ('git rm foo', 'foo',", "def test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [", "bar'])]) def test_get_new_command(output, script, target, new_command): assert get_new_command(Command(script, output)) ==", "def output(target): return ('error: the following file has local modifications:\\n", "foo bar'])]) def test_get_new_command(output, script, target, new_command): assert get_new_command(Command(script, output))", "-f foo bar'])]) def test_get_new_command(output, script, target, new_command): assert get_new_command(Command(script,", "match, get_new_command from theheck.types import Command @pytest.fixture def output(target): return", "import match, get_new_command from theheck.types import Command @pytest.fixture def output(target):", "bar', 'bar', ['git rm --cached foo bar', 'git rm -f", "'git rm -f foo bar'])]) def test_get_new_command(output, script, target, new_command):", "get_new_command from theheck.types import Command @pytest.fixture def output(target): return ('error:", "return ('error: the following file has local modifications:\\n {}\\n(use '", "from theheck.types import Command @pytest.fixture def output(target): return ('error: the", "('error: the following file has local modifications:\\n {}\\n(use ' '--cached", "test_not_match(script): assert not match(Command(script, '')) @pytest.mark.parametrize('script, target, new_command', [ ('git", "or -f to force removal)').format(target) @pytest.mark.parametrize('script, target', [ ('git rm", "the following file has local modifications:\\n {}\\n(use ' '--cached to", "'foo', ['git rm --cached foo', 'git rm -f foo']), ('git", "[ ('git rm foo', 'foo'), ('git rm foo bar', 'bar')])" ]
[ "import JSONResponse from fastapi.staticfiles import StaticFiles from starlette.responses import PlainTextResponse,", "json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger \"\"\"", "@data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async", "json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del", "logging import json_logging import tomlkit import uvicorn from fastapi import", "JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None,", "import EmptyResultSetException \"\"\" Self-hosting JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs", "\"data-service\" del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del", "fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import", "import json_logging import tomlkit import uvicorn from fastapi import FastAPI,", "tomlkit import uvicorn from fastapi import FastAPI, status from fastapi.encoders", "name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html(", "del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip']", "from data_service.config import config from data_service.core.processor import NotFoundException from data_service.core.filters", "\"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util)", "openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta():", "\" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with open('./pyproject.toml') as", "json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc)", "del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object", "record.getMessage() }) if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del", "json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return", "request logger \"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter,", "redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents =", "status from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html,", "unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server Error\",", "import tomlkit import uvicorn from fastapi import FastAPI, status from", "PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app,", "data_service.api.observability_api import observability_router from data_service.config import config from data_service.core.processor import", "get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def", "json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user']", "\"\"\" Self-hosting JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app", "fastapi.staticfiles import StaticFiles from starlette.responses import PlainTextResponse, Response from data_service.api.data_api", "json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host')", "include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \"", "json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started", "json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"]", "swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\",", "def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({", "Self-hosting JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app =", "openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\",", "status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request,", "docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")", "del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type']", "Response from data_service.api.data_api import data_router from data_service.api.observability_api import observability_router from", "str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg']", "= json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] =", "Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def", "= super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"] =", "Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO)", "return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"}) ) @data_service_app.exception_handler(Exception) async", "def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log =", "get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles", "from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html,", "return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log", "del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter):", "async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html():", "return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application", "json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger \"\"\" def _format_log_object(self,", "data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\",", "swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html()", ") @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc)", "= json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] =", "logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event():", "def _get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents = pyproject.read() return", "del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b']", "\"exc_info\" in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] =", "@data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title +", "return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", )", "as pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta()", "= json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] =", "def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT", "record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] =", "del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log", "JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"}) ) @data_service_app.exception_handler(Exception) async def", "import FastAPI, status from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import", "json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\"", "logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__", "import data_router from data_service.api.observability_api import observability_router from data_service.config import config", "@data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title +", "json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc):", "get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse from fastapi.staticfiles", "get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse from fastapi.staticfiles import", "https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router)", "( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse from", "json_logging import tomlkit import uvicorn from fastapi import FastAPI, status", "= logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def", "config from data_service.core.processor import NotFoundException from data_service.core.filters import EmptyResultSetException \"\"\"", "import PlainTextResponse, Response from data_service.api.data_api import data_router from data_service.api.observability_api import", "redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\",", "import observability_router from data_service.config import config from data_service.core.processor import NotFoundException", "json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del", "import logging import json_logging import tomlkit import uvicorn from fastapi", "uvicorn from fastapi import FastAPI, status from fastapi.encoders import jsonable_encoder", "FastAPI, status from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import (", "request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() })", "def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({", "_format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\":", "empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT )", "return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger \"\"\" def", "application logger \"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog,", "json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] =", "json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del", "FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async", "+ \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with open('./pyproject.toml')", "pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger \"\"\"", "CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger \"\"\" def _format_log_object(self, record, request_util):", "data_service.core.processor import NotFoundException from data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting JavaScript", "starlette.responses import PlainTextResponse, Response from data_service.api.data_api import data_router from data_service.api.observability_api", "json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return", "json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException)", "= super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) if \"exc_info\"", "title=data_service_app.title + \" - ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with", "json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\"", "json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del", "del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for']", "data_service.api.data_api import data_router from data_service.api.observability_api import observability_router from data_service.config import", "async def unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal", "fastapi.encoders import jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html,", "status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log = logging.getLogger(__name__)", "json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status')", "+ \" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url,", "exc): log = logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException)", "del json_log_object['module'] del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized", "json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"]", "custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service')", "request_util) json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] =", "PlainTextResponse, Response from data_service.api.data_api import data_router from data_service.api.observability_api import observability_router", "log.info('Started data-service') log.info(config.get_settings().print()) if __name__ == \"__main__\": uvicorn.run(data_service_app, host=\"0.0.0.0\", port=8000)", "import uvicorn from fastapi import FastAPI, status from fastapi.encoders import", "data_service.config import config from data_service.core.processor import NotFoundException from data_service.core.filters import", "NotFoundException from data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting JavaScript and CSS", "return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url,", "@data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return", "= \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts']", "= json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer']", "redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def", ") @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False)", "from data_service.api.data_api import data_router from data_service.api.observability_api import observability_router from data_service.config", "logger \"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record,", "JSONResponse from fastapi.staticfiles import StaticFiles from starlette.responses import PlainTextResponse, Response", "async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" -", "_get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger \"\"\" def _format_log_object(self,", "if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"]", "= json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] =", "import StaticFiles from starlette.responses import PlainTextResponse, Response from data_service.api.data_api import", "pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class", "del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no']", "del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] =", "= logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__ == \"__main__\": uvicorn.run(data_service_app,", "custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - Swagger UI\",", "def unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server", "_get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry']", "@data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log", "content=jsonable_encoder({\"detail\": \"No such datastructure\"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc):", "@data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return", "custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if", "from data_service.api.observability_api import observability_router from data_service.config import config from data_service.core.processor", "request_util) json_log_object.update({ \"message\": record.getMessage() }) if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"]", "json_log_object.update({ \"message\": record.getMessage() }) if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"] =", "del json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return json_log_object", "\"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts'] del", "data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url,", "def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - ReDoc\",", "data_router from data_service.api.observability_api import observability_router from data_service.config import config from", "tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger", "import NotFoundException from data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting JavaScript and", "\"message\": record.getMessage() }) if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info')", "del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request,", "data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title", "= _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger \"\"\" def", "StaticFiles from starlette.responses import PlainTextResponse, Response from data_service.api.data_api import data_router", "class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger \"\"\" def _format_log_object(self, record,", "del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at']", "def not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND,", "from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from starlette.responses", "json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) if", "- ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with open('./pyproject.toml') as pyproject:", "json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del", "UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect():", "file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter):", "not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\":", "\"No such datastructure\"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log", "@data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return", "CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\",", "include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \"", "json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del", "super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) if \"exc_info\" in", "self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) if \"exc_info\" in json_log_object:", "json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del", "and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None)", "\"\"\" Customized request logger \"\"\" def _format_log_object(self, record, request_util): json_log_object", "datastructure\"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log = logging.getLogger(__name__)", "in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at')", "with open('./pyproject.toml') as pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta", "data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html():", "json_log_object['type'] del json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return json_log_object class", "def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html(): return", "= json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] =", "from fastapi.staticfiles import StaticFiles from starlette.responses import PlainTextResponse, Response from", "del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b']", "ReDoc\", redoc_js_url=\"/static/redoc.standalone.js\", ) def _get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents", "= json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger')", "= json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type'] del", ") from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from", "logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"}) )", "= \"data-service\" del json_log_object['written_ts'] del json_log_object['type'] del json_log_object['msg'] del json_log_object['module']", "super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at')", "record, request_util): json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage()", "include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def", "data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting JavaScript and CSS for docs", "oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async def swagger_ui_redirect(): return", "- Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False) async", ") def _get_project_meta(): with open('./pyproject.toml') as pyproject: file_contents = pyproject.read()", "json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"]", "= logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def", "\" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", ) @data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False)", "fastapi import FastAPI, status from fastapi.encoders import jsonable_encoder from fastapi.openapi.docs", "import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses import JSONResponse", "status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger()", "log = logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async", "get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title", "json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del", "del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async", "<reponame>statisticsnorway/microdata-data-service import logging import json_logging import tomlkit import uvicorn from", "json_log_object['msg'] del json_log_object['module'] del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\"", "\"\"\" Customized application logger \"\"\" def _format_log_object(self, record, request_util): json_log_object", "= json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts']", "}) if \"exc_info\" in json_log_object: json_log_object[\"error.stack\"] = json_log_object.pop('exc_info') del json_log_object['filename']", "\"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util)", "Customized application logger \"\"\" def _format_log_object(self, record, request_util): json_log_object =", "async def redoc_html(): return get_redoc_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" -", "_format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\":", "log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__ == \"__main__\":", "}) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request')", "logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__ == \"__main__\": uvicorn.run(data_service_app, host=\"0.0.0.0\",", "Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc): log =", "from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from fastapi.responses", "logger \"\"\" def _format_log_object(self, record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record,", "def custom_swagger_ui_html(): return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - Swagger", "json_log_object['written_ts'] del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del", "json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del", "log = logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such", "json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"]", "= json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] =", "del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at']", "Customized request logger \"\"\" def _format_log_object(self, record, request_util): json_log_object =", "StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False) async def custom_swagger_ui_html(): return", "log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request, exc):", "json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del json_log_object['written_ts'] del json_log_object['type']", "jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, ) from", "log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"}) ) @data_service_app.exception_handler(Exception)", "exc): log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server Error\", status_code=500)", "startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__)", "async def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return Response(", "json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"]", "= logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No such datastructure\"})", "for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"),", "= pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\"", "= str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts'] del json_log_object['type'] del", "exc): log = logging.getLogger(__name__) log.exception(exc) return JSONResponse( status_code=status.HTTP_404_NOT_FOUND, content=jsonable_encoder({\"detail\": \"No", "from data_service.core.processor import NotFoundException from data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting", "record, request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage()", "del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger", "swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @data_service_app.get(\"/redoc\", include_in_schema=False) async def redoc_html(): return get_redoc_html(", "json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log =", "json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print()) if __name__ ==", "json_log_object['response_size_b'] del json_log_object['response_content_type'] del json_log_object['response_sent_at'] return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def", "return get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url,", "from fastapi import FastAPI, status from fastapi.encoders import jsonable_encoder from", "json_log_object['module'] del json_log_object['line_no'] return json_log_object class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request", "request_util): json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() })", "from data_service.core.filters import EmptyResultSetException \"\"\" Self-hosting JavaScript and CSS for", "from starlette.responses import PlainTextResponse, Response from data_service.api.data_api import data_router from", "CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized application logger \"\"\" def _format_log_object(self, record, request_util):", "fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from starlette.responses import", "del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port']", "json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"]", "async def not_found_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc) return JSONResponse(", "Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog) json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter)", "open('./pyproject.toml') as pyproject: file_contents = pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta =", "logging.getLogger(__name__) log.exception(exc) return Response( status_code=status.HTTP_204_NO_CONTENT ) @data_service_app.exception_handler(NotFoundException) async def not_found_exception_handler(request,", "json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"]", "del json_log_object['type'] del json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol']", "json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms') json_log_object[\"statusCode\"] = json_log_object.pop('response_status') del", "json_log_object.pop('exc_info') del json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"]", "import config from data_service.core.processor import NotFoundException from data_service.core.filters import EmptyResultSetException", "get_swagger_ui_html( openapi_url=data_service_app.openapi_url, title=data_service_app.title + \" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\",", "import jsonable_encoder from fastapi.openapi.docs import ( get_redoc_html, get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html, )", "observability_router from data_service.config import config from data_service.core.processor import NotFoundException from", "\"\"\" data_service_app = FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router)", "json_log_object['remote_user'] del json_log_object['referer'] del json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del", "json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"]", "= FastAPI(docs_url=None, redoc_url=None) data_service_app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") data_service_app.include_router(data_router) data_service_app.include_router(observability_router) @data_service_app.get(\"/docs\", include_in_schema=False)", "\"message\": record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id') json_log_object[\"url\"]", "pyproject.read() return tomlkit.parse(file_contents)['tool']['poetry'] pkg_meta = _get_project_meta() class CustomJSONLog(json_logging.JSONLogFormatter): \"\"\" Customized", "json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"] = json_log_object.pop('correlation_id')", "json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level') json_log_object[\"schemaVersion\"] = \"v3\" json_log_object[\"serviceVersion\"] = str(pkg_meta['version'])", "such datastructure\"}) ) @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log =", "log.exception(exc) return PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True,", "self)._format_log_object(record, request_util) json_log_object.update({ \"message\": record.getMessage() }) json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"xRequestId\"]", "json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del json_log_object['request_received_at'] del json_log_object['response_size_b'] del", "return PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\") def startup_event(): json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog)", "title=data_service_app.title + \" - Swagger UI\", oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url, swagger_js_url=\"/static/swagger-ui-bundle.js\", swagger_css_url=\"/static/swagger-ui.css\", )", "class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter): \"\"\" Customized request logger \"\"\" def _format_log_object(self, record,", "json_log_object.pop('correlation_id') json_log_object[\"url\"] = json_log_object.pop('request') json_log_object[\"source_host\"] = json_log_object.pop('remote_host') json_log_object[\"responseTime\"] = json_log_object.pop('response_time_ms')", "json_log_object[\"serviceVersion\"] = str(pkg_meta['version']) json_log_object[\"serviceName\"] = \"data-service\" del json_log_object['written_ts'] del json_log_object['type']", "log = logging.getLogger(__name__) log.exception(exc) return PlainTextResponse(\"Internal Server Error\", status_code=500) @data_service_app.on_event(\"startup\")", ") @data_service_app.exception_handler(Exception) async def unknown_exception_handler(request, exc): log = logging.getLogger(__name__) log.exception(exc)", "json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter) logging.basicConfig(level=logging.INFO) json_logging.config_root_logger() log = logging.getLogger(__name__) log.info('Started data-service') log.info(config.get_settings().print())", "EmptyResultSetException \"\"\" Self-hosting JavaScript and CSS for docs https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs \"\"\"", "return json_log_object @data_service_app.exception_handler(EmptyResultSetException) async def empty_result_set_exception_handler(request, exc): log = logging.getLogger(__name__)", "json_log_object['x_forwarded_for'] del json_log_object['protocol'] del json_log_object['remote_ip'] del json_log_object['request_size_b'] del json_log_object['remote_port'] del", "json_log_object['filename'] json_log_object[\"@timestamp\"] = json_log_object.pop('written_at') json_log_object[\"loggerName\"] = json_log_object.pop('logger') json_log_object[\"levelName\"] = json_log_object.pop('level')" ]
[ "self.random_state, ) ] walk.append(next) else: break return walk @staticmethod def", "edges\" ) if is_directed: for edge in graph.edges(): current_edge +=", "the list, default to a walk length of 1 if", "nodes degree. If the node is in the bottom 20", "# Percentiles will be provided if we are using the", "edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for edge in graph.edges():", "logging.info(f\"Completed preprocessing of transition probabilities for edges\") self.alias_nodes = alias_nodes", "= smaller.pop() large = larger.pop() sampled_probabilities[small] = large alias[large] =", "int = 80, return_hyperparameter: float = 1.0, inout_hyperparameter: float =", "can happen depending on the percentiles used if new_walk_length <", "same number of walks as a high degree node (which", "walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return walks def _get_alias_edge(self, source:", "sentence. Default is 10. workers : int Use these many", "the code is to default the bottom 20% of to", "for edge in graph.edges(): current_edge += 1 if current_edge >", "reference implementation of node2vec comes from Aditya Grover from https://github.com/aditya-grover/node2vec/.", "(q). Default is 1.0 dimensions : int Dimensionality of the", "Given a node's degree, determine the length of a walk", "in the bottom 20 percentile, default to a walk length", "1.0, dimensions: int = 128, window_size: int = 10, workers:", "the walk. \"\"\" new_walk_length = None for i, percentile in", "> 0: if len(walk) == 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0],", "threads to train the model. Default is 8. iterations :", "the same number of walks as a high degree node", "multigraph to be treated for the \" \"purposes of this", "* quotient: bucket += 1 logging.info(f\"Completed {current_node} / {total_nodes} vertices\")", "to 1. Otherwise, if the degree is greater than the", "= int(total_nodes / 10) logging.info( f\"Beginning preprocessing of transition probabilities", "_alias_setup(probabilities: List[float]): \"\"\" Compute utility lists for non-uniform sampling from", "if not new_walk_length: new_walk_length = max_walk_length # a walk length", "= walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1],", "model. Default is 8. iterations : int Number of epochs", ") ] ) else: prev = walk[-2] next = current_neighbors[", "0.0: raise ValueError(f\"{name} must be >= 0.0\") def _preconditions( graph:", "a smaller breadth of paths # (due to their being", "edge setup lists for a given edge. \"\"\" graph =", "value: float): if not isinstance(value, float): raise TypeError(f\"{name} must be", "numpy as np from ..utils import remap_node_ids def node2vec_embed( graph:", "[start_node] # Percentiles will be provided if we are using", "a walk that should be used. If the degree is", "degree is above the last percentile if not new_walk_length: new_walk_length", "for u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float", "raise TypeError(f\"{name} must be a float\") if value < 0.0:", "\"\"\" graph = self.graph p = self.p q = self.q", "alias def _alias_draw( probabilities: List[float], alias: List[float], random_state: np.random.RandomState ):", "num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, )", "with each row index corresponding to the embedding for each", "int(total_edges / 10) logging.info( f\"Beginning preprocessing of transition probabilities for", "graph = self.graph is_directed = self.is_directed alias_nodes = {} total_nodes", "default it to the max_walk_length. If it falls in the", "and predicted word within a sentence. Default is 10. workers", "word vectors. Default is 128. window_size : int Maximum distance", "\"\"\" Draw sample from a non-uniform discrete distribution using alias", "it falls in the middle, do a linear interpolation to", "must be >= 0.0\") def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks:", "\"node2vec: Scalable Feature Learning for Networks.\" Knowledge Discovery and Data", "inner state object for constructing the random walks Parameters ----------", "Word2Vec walks = [list(map(str, walk)) for walk in walks] #", "``PYTHONHASHSEED`` must be set to control hash randomization. Returns -------", "to eliminate ordering jitter from OS thread scheduling. In addition", "multigraph should be turned into a non-multigraph so that the", "graph or digraph. A multigraph should be turned into a", "10 percentile, use ``walk_length``. If it is in the 20-80", "float): raise TypeError(f\"{name} must be a float\") if value <", "= time.time() logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on", "unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of transition", "(SGD) interpolate_walk_lengths_by_node_degree : bool Use a dynamic walk length that", "the first element of the percentiles list, default the walk", "= random_state def node2vec_walk( self, walk_length: int, start_node: Any, degree_percentiles:", "in. if degree <= percentile: new_walk_length = max_walk_length * ((i", "between 1 and ``walk_length``. This will reduce lower degree nodes", "List[Any]] A tuple containing a matrix, with each row index", "nx.DiGraph] A networkx graph or digraph. A multigraph should be", "nodes to choose from) and will bias your resulting Word2Vec", "workers, iterations, random_seed ) end = time.time() logging.info( f\"Completed. Ending", "-> Tuple[np.array, List[Any]]: \"\"\" Generates a node2vec embedding from a", "20% of to a minimal walk length, default the top", "otherwise CBOW workers=workers, iter=iterations, seed=random_seed, ) return model class _Node2VecGraph:", "A networkx graph return_hyperparameter : float Return hyperparameter inout_hyperparameter :", "the length of the walk. \"\"\" new_walk_length = None for", "_, degree in graph.degree()], [x for x in range(20, 90,", "[] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree:", "sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW workers=workers,", "preprocessing of transition probabilities for {total_edges} edges\" ) if is_directed:", "embedding. If a low degree node has the same number", "---------- .. [1] <NAME> and <NAME> \"node2vec: Scalable Feature Learning", "also limit to a single worker thread (`workers=1`), to eliminate", "of the list, default it to the max_walk_length. If it", "node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f\"Starting", "enumerate(percentiles): # if we are below the first percentile in", "= [] for i, prob in enumerate(probabilities): alias[i] = number_of_outcomes", "self.q unnormalized_probs = [] for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor", "be set to control hash randomization. Returns ------- Tuple[np.array, List[Any]]", "return 1 # otherwise, find which bucket we are going", "inout_hyperparameter self.random_state = random_state def node2vec_walk( self, walk_length: int, start_node:", ": float Return hyperparameter inout_hyperparameter : float Inout hyperparameter random_state", "Tuple, Union import networkx as nx import numpy as np", "and will bias your resulting Word2Vec embedding if degree_percentiles is", "_alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next) else:", "\" \"multigraph with different behaviors, we insist that the caller", "matrix, with each row index corresponding to the embedding for", "current)][0], alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next) else: break return", "per source. Default is 10. walk_length: int Length of walk", "node's degree, determine the length of a walk that should", "transition probabilities for edges\") self.alias_nodes = alias_nodes self.alias_edges = alias_edges", "= nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length )", "None for i, percentile in enumerate(percentiles): # if we are", "typing import Any, List, Optional, Tuple, Union import networkx as", "not new_walk_length: new_walk_length = max_walk_length # a walk length of", "source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p ) elif graph.has_edge(destination_neighbor, source):", "int, workers: int, iterations: int, random_seed: Optional[int], ): \"\"\" Learn", "a non-multigraph so that the calling user properly handles the", ") self.random_state.shuffle(nodes) for node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node,", "walks = [list(map(str, walk)) for walk in walks] # Documentation", "else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q ) norm_const = sum(unnormalized_probs)", "in the matrix. The matrix and vector are positionally correlated.", "seed=random_seed, ) return model class _Node2VecGraph: \"\"\" Temporary inner state", "your higher degree nodes. random_seed : int Seed to be", "node in labels]), labels, ) def _assert_is_positive_int(name: str, value: int):", "walks = [] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None", "min_count=0, sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW", "Generates a node2vec embedding from a given graph. Will follow", "Default is 80. return_hyperparameter : float Return hyperparameter (p). Default", "in range(20, 90, 10)] ) for walk_iteration in range(num_walks): logging.info(", "if current_edge > bucket * quotient: bucket += 1 logging.info(f\"Completed", "is in the bottom 20 percentile, default to a walk", "sample from a non-uniform discrete distribution using alias sampling. \"\"\"", "take a smaller breadth of random walks when compared to", "Default is 1.0 dimensions : int Dimensionality of the word", "your resulting embedding. If a low degree node has the", "10, walk_length: int = 80, return_hyperparameter: float = 1.0, inout_hyperparameter:", "= self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of transition probabilities for edges\")", "we insist that the caller create an appropriate Graph or", "the calling user properly handles the multi-edges (i.e. aggregate weights", "length of a walk that should be used. If the", "int, interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph, nx.Graph): raise TypeError(\"graph", "nodes. If the low degree nodes have the # same", "edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])", "length to 1. Otherwise, if the degree is greater than", "8. iterations : int Number of epochs in stochastic gradient", "used. If the degree is less than the first element", "https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1, #", "to 1. num_walks : int Number of walks per source.", "sampled_probabilities, alias def _alias_draw( probabilities: List[float], alias: List[float], random_state: np.random.RandomState", "False, ): \"\"\" Repeatedly simulate random walks from each node.", "self, walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate", "for reproducible results. Default is None and will produce a", "top 10% to a # maximum walk length, and interpolate", "to be treated for the \" \"purposes of this embedding\"", "= graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed() self.p =", "remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int = 10,", "as np from ..utils import remap_node_ids def node2vec_embed( graph: Union[nx.Graph,", "larger.append(i) while len(smaller) > 0 and len(larger) > 0: small", "alias_nodes self.alias_edges = alias_edges return def _alias_setup(probabilities: List[float]): \"\"\" Compute", "in this functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ----------", "used if new_walk_length < 1: new_walk_length = 1 return math.floor(new_walk_length)", "preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes and", "10) logging.info( f\"Beginning preprocessing of transition probabilities for {total_nodes} vertices\"", "and degree < percentile: return 1 # otherwise, find which", "the graph is unweighted, the weight of each edge will", "iterations, random_seed ) end = time.time() logging.info( f\"Completed. Ending time", "inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if", "inout_hyperparameter: float, dimensions: int, window_size: int, workers: int, iterations: int,", "should be used. If the degree is less than the", "using alias sampling. \"\"\" number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand()", "the walk length to 1. Otherwise, if the degree is", "random_seed: Optional[int] = None, ) -> Tuple[np.array, List[Any]]: \"\"\" Generates", "quotient: bucket += 1 logging.info(f\"Completed {current_node} / {total_nodes} vertices\") unnormalized_probs", "< percentile: return 1 # otherwise, find which bucket we", "transition probabilities for vertices\" ) alias_edges = {} total_edges =", "= np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state )", "ValueError(f\"{name} must be >= 0.0\") def _preconditions( graph: Union[nx.Graph, nx.DiGraph],", "1 return math.floor(new_walk_length) def _simulate_walks( self, num_walks: int, walk_length: int,", "graph[node][nbr].get(\"weight\", weight_default) for nbr in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs)", "random_state : np.random.RandomState Random State for reproducible results. Default is", "for details \"\"\" number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities", "inout_hyperparameter: float = 1.0, dimensions: int = 128, window_size: int", "on graph at time {str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks, walk_length,", "nx.Graph): raise TypeError(\"graph must be a networkx Graph or DiGraph\")", "[] for i, prob in enumerate(probabilities): alias[i] = number_of_outcomes *", "vector are positionally correlated. Notes ----- The original reference implementation", "in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float = 1.0):", "/ {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for edge", "= len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() <", "Discovery and Data Mining, 2016. \"\"\" _preconditions( graph, num_walks, walk_length,", "nodes will take a smaller breadth of paths # (due", "[list(map(str, walk)) for walk in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html", "single worker thread (`workers=1`), to eliminate ordering jitter from OS", "This will reduce lower degree nodes from biasing your resulting", "window_size: int, workers: int, iterations: int, random_seed: Optional[int], ): \"\"\"", "return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels, ) def", "interpolate between 1 and ``walk_length``. This will reduce lower degree", "list, max_walk_length: int ): \"\"\" Given a node's degree, determine", "= [] larger = [] for i, prob in enumerate(probabilities):", "your random walks be dominated by low degree nodes. If", "edge[1]) else: for edge in graph.edges(): current_edge += 1 if", "Maximum distance between the current and predicted word within a", "if degree <= percentile: new_walk_length = max_walk_length * ((i *", "has the same number of walks as a high degree", "objective using SGD. \"\"\" from gensim.models import Word2Vec walks =", "Inout hyperparameter random_state : np.random.RandomState Random State for reproducible results.", "be used for reproducible results. Default is None and will", "is None and will produce a random output. Note that", "len(walk) < walk_length: current = walk[-1] current_neighbors = sorted(graph.neighbors(current)) if", "must also limit to a single worker thread (`workers=1`), to", "sampling. \"\"\" number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes))", "calling user properly handles the multi-edges (i.e. aggregate weights or", "Tuple[np.array, List[Any]]: \"\"\" Generates a node2vec embedding from a given", "degree nodes will take a smaller breadth of random walks", "and will produce a random output. Note that for a", "the high degree nodes, the low degree nodes will take", "by low degree nodes. If the low degree nodes have", "return def _alias_setup(probabilities: List[float]): \"\"\" Compute utility lists for non-uniform", "in enumerate(probabilities): alias[i] = number_of_outcomes * prob if alias[i] <", "num_walks: int, walk_length: int, return_hyperparameter: float, inout_hyperparameter: float, dimensions: int,", "_assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations)", "``walk_length``. This will reduce lower degree nodes from biasing your", "dtype=int) smaller = [] larger = [] for i, prob", "current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next)", "_alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float = 1.0): \"\"\" Preprocessing of", "sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller = [] larger = []", "- because there are two reasonable ways to treat a", "{total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1],", "must be > 0\") def _assert_is_nonnegative_float(name: str, value: float): if", "[x for x in range(20, 90, 10)] ) for walk_iteration", "find which bucket we are going to be in. if", "the max_walk_length. If it falls in the middle, do a", "of random walks when compared to the high degree nodes.", "float Inout hyperparameter (q). Default is 1.0 dimensions : int", "int): if not isinstance(value, int): raise TypeError(f\"{name} must be an", "< 1: new_walk_length = 1 return math.floor(new_walk_length) def _simulate_walks( self,", "= self.graph is_directed = self.is_directed alias_nodes = {} total_nodes =", "will take a smaller breadth of paths # (due to", "less than the first element of the percentiles list, default", "walk per source. Default is 80. return_hyperparameter : float Return", "the middle, do a linear interpolation to decide the length", "Feature Learning for Networks.\" Knowledge Discovery and Data Mining, 2016.", "max_walk_length: int ): \"\"\" Given a node's degree, determine the", "float = 1.0): \"\"\" Preprocessing of transition probabilities for guiding", "List[Any], dimensions: int, window_size: int, workers: int, iterations: int, random_seed:", "the alias edge setup lists for a given edge. \"\"\"", "to a single worker thread (`workers=1`), to eliminate ordering jitter", ": int Dimensionality of the word vectors. Default is 128.", "linear interpolation to decide the length of the walk. \"\"\"", "dimensions: int, window_size: int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool,", "* prob if alias[i] < 1.0: smaller.append(i) else: larger.append(i) while", ") start = time.time() logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating", "last percentile if not new_walk_length: new_walk_length = max_walk_length # a", "high degree nodes. This will result in your lower degree", "deterministically-reproducible run, you must also limit to a single worker", "degree in graph.degree()], [x for x in range(20, 90, 10)]", "resulting Word2Vec embedding if degree_percentiles is not None: degree =", "Default is 10. workers : int Use these many worker", "node2vec_walk( self, walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray], ): \"\"\"", "{total_nodes} vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default) for nbr in", "two reasonable ways to treat a \" \"multigraph with different", "def __init__( self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state:", "DiGraph\") if graph.is_multigraph(): raise ValueError( \"This function does not work", "as nx import numpy as np from ..utils import remap_node_ids", "bool, ): if not isinstance(graph, nx.Graph): raise TypeError(\"graph must be", "len(graph.edges()) bucket = 0 current_edge = 0 quotient = int(total_edges", "original reference implementation of node2vec comes from Aditya Grover from", "0 and len(larger) > 0: small = smaller.pop() large =", "return_hyperparameter : float Return hyperparameter inout_hyperparameter : float Inout hyperparameter", "is not None: degree = nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated(", "= list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles =", "of the code is to default the bottom 20% of", "= self.q unnormalized_probs = [] for destination_neighbor in sorted(graph.neighbors(destination)): if", "use ``walk_length``. If it is in the 20-80 percentiles, linearly", "default the walk length to 1. Otherwise, if the degree", "graph.nodes(): current_node += 1 if current_node > bucket * quotient:", "None and will produce random results \"\"\" def __init__( self,", "hyperparameter inout_hyperparameter : float Inout hyperparameter random_state : np.random.RandomState Random", "function does not work on multigraphs - because there are", "for a given edge. \"\"\" graph = self.graph p =", "your resulting Word2Vec embedding if degree_percentiles is not None: degree", "walk_iteration in range(num_walks): logging.info( \"Walk iteration: \" + str(walk_iteration +", "\"/\" + str(num_walks) ) self.random_state.shuffle(nodes) for node in nodes: walks.append(", "): \"\"\" Draw sample from a non-uniform discrete distribution using", "that for a fully deterministically-reproducible run, you must also limit", "percentile, use ``walk_length``. If it is in the 20-80 percentiles,", "the random walks Parameters ---------- graph: nx.Graph A networkx graph", "logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else:", "vertices\" ) for node in graph.nodes(): current_node += 1 if", "int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph, nx.Graph):", "\"\"\" Compute utility lists for non-uniform sampling from discrete distributions.", "): if not isinstance(graph, nx.Graph): raise TypeError(\"graph must be a", "{str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph at time {str(time.time())}\") walks", "on the percentiles used if new_walk_length < 1: new_walk_length =", "must be a float\") if value < 0.0: raise ValueError(f\"{name}", "length, and interpolate the inner 70% linearly from min to", "the Alias Method used in this functionality can be found", "int, percentiles: list, max_walk_length: int ): \"\"\" Given a node's", "= _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f\"Starting preprocessing", "node2vec comes from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details on", "import logging import math import time from typing import Any,", "# Licensed under the MIT License. import logging import math", "break return walk @staticmethod def _get_walk_length_interpolated( degree: int, percentiles: list,", "= walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if", "nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return walks", "current_node += 1 if current_node > bucket * quotient: bucket", "raise ValueError(f\"{name} must be >= 0.0\") def _preconditions( graph: Union[nx.Graph,", "math import time from typing import Any, List, Optional, Tuple,", "0 and degree < percentile: return 1 # otherwise, find", "elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\",", "workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph,", "{} total_nodes = len(graph.nodes()) bucket = 0 current_node = 0", "_learn_embeddings( walks, dimensions, window_size, workers, iterations, random_seed ) end =", "transition probabilities for guiding the random walks. \"\"\" graph =", "20-80 percentiles, linearly interpolate between 1 and ``walk_length``. This will", "of transition probabilities for {total_edges} edges\" ) if is_directed: for", "graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string =", "train the model. Default is 8. iterations : int Number", "_assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size)", "{current_node} / {total_nodes} vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default) for", "dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree,", "top 10 percentile, use ``walk_length``. If it is in the", "range(20, 90, 10)] ) for walk_iteration in range(num_walks): logging.info( \"Walk", "starting from start node. \"\"\" graph = self.graph alias_nodes =", "Use a dynamic walk length that corresponds to each nodes", "if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p )", "= None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for _,", "a networkx Graph or DiGraph\") if graph.is_multigraph(): raise ValueError( \"This", "alias sampling. \"\"\" number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand() *", "graph.is_multigraph(): raise ValueError( \"This function does not work on multigraphs", "is to avoid having your random walks be dominated by", "degree is less than the first element of the percentiles", "# maximum walk length, and interpolate the inner 70% linearly", "for each row in the matrix. The matrix and vector", "_assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if not", "interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph, nx.Graph): raise TypeError(\"graph must", "= max_walk_length # a walk length of 0 is invalid", "norm_const = sum(unnormalized_probs) normalized_probs = [ float(u_prob) / norm_const for", "degree nodes from biasing your resulting embedding. If a low", "return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None, ): self.original_graph:", "degree is greater than the last element of the list,", "large alias[large] = alias[large] + alias[small] - 1.0 if alias[large]", "Any): \"\"\" Get the alias edge setup lists for a", "Length of walk per source. Default is 80. return_hyperparameter :", "is 128. window_size : int Maximum distance between the current", "remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed()", "self.is_directed alias_nodes = {} total_nodes = len(graph.nodes()) bucket = 0", "model = _learn_embeddings( walks, dimensions, window_size, workers, iterations, random_seed )", "if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for _, degree in", "<NAME> and <NAME> \"node2vec: Scalable Feature Learning for Networks.\" Knowledge", "random results \"\"\" def __init__( self, graph: nx.Graph, return_hyperparameter: float,", "if the degree is greater than the last element of", "embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A networkx graph or", "int Use these many worker threads to train the model.", "the corresponding vertex labels for each row in the matrix.", "i, percentile in enumerate(percentiles): # if we are below the", "to the max_walk_length. If it falls in the middle, do", "alias: List[float], random_state: np.random.RandomState ): \"\"\" Draw sample from a", "{total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for edge in", "= len(graph.nodes()) bucket = 0 current_node = 0 quotient =", "lower degree walks dominating your higher degree nodes. random_seed :", "[ graph[node][nbr].get(\"weight\", weight_default) for nbr in sorted(graph.neighbors(node)) ] norm_const =", "unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float = 1.0): \"\"\"", "alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller = []", "word2vec algorithm to create the embedding. Parameters ---------- graph: Union[nx.Graph,", "= 1.0): \"\"\" Preprocessing of transition probabilities for guiding the", "discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes =", "str, value: float): if not isinstance(value, float): raise TypeError(f\"{name} must", "None, ): self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph)", "random walks Parameters ---------- graph: nx.Graph A networkx graph return_hyperparameter", "edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of transition", "multigraphs - because there are two reasonable ways to treat", "degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate a random walk starting from", "the degree is greater than the last element of the", "walks: List[Any], dimensions: int, window_size: int, workers: int, iterations: int,", "start node. \"\"\" graph = self.graph alias_nodes = self.alias_nodes alias_edges", "lists for a given edge. \"\"\" graph = self.graph p", "and len(larger) > 0: small = smaller.pop() large = larger.pop()", "def _assert_is_positive_int(name: str, value: int): if not isinstance(value, int): raise", "_assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree", "degree_percentiles, walk_length ) while len(walk) < walk_length: current = walk[-1]", "or DiGraph\") if graph.is_multigraph(): raise ValueError( \"This function does not", "self.graph alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node]", "np.random.RandomState Random State for reproducible results. Default is None and", "is_directed = self.is_directed alias_nodes = {} total_nodes = len(graph.nodes()) bucket", "alias edge setup lists for a given edge. \"\"\" graph", "in sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) /", "alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] ) else: prev = walk[-2]", "are below the first percentile in the list, default to", "walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return walks def", "using SGD. \"\"\" from gensim.models import Word2Vec walks = [list(map(str,", "Repeatedly simulate random walks from each node. \"\"\" graph =", "1. Otherwise, if the degree is greater than the last", "percentile, default to a walk length of 1. If it", "= alias[large] + alias[small] - 1.0 if alias[large] < 1.0:", "a dynamic walk length that corresponds to each nodes degree.", "+ \"/\" + str(num_walks) ) self.random_state.shuffle(nodes) for node in nodes:", "Return hyperparameter (p). Default is 1.0 inout_hyperparameter : float Inout", ") else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q ) norm_const =", "The tuple also contains a vector containing the corresponding vertex", "return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float = 1.0): \"\"\" Preprocessing", "represents the manner in which they'd like the multigraph to", "must be set to control hash randomization. Returns ------- Tuple[np.array,", "walk that should be used. If the degree is less", "= False, ): \"\"\" Repeatedly simulate random walks from each", "_get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length: int ): \"\"\" Given", "OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must", "or \" \"DiGraph that represents the manner in which they'd", "None: degree = nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles,", "walks Parameters ---------- graph: nx.Graph A networkx graph return_hyperparameter :", "a low degree node has the same number of walks", "value <= 0: raise ValueError(f\"{name} must be > 0\") def", "int Length of walk per source. Default is 80. return_hyperparameter", "Copyright (c) Microsoft Corporation and contributors. # Licensed under the", "quotient: bucket += 1 logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge]", "10) logging.info( f\"Beginning preprocessing of transition probabilities for {total_edges} edges\"", "_assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers)", "Learn embeddings by optimizing the skip-gram objective using SGD. \"\"\"", "q ) norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const", "alias[i] < 1.0: smaller.append(i) else: larger.append(i) while len(smaller) > 0", "Further details on the Alias Method used in this functionality", "graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None,", "walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if len(walk)", "list, default the walk length to 1. Otherwise, if the", "walks as the high degree nodes, the low degree nodes", "will default to 1. num_walks : int Number of walks", "from each node. \"\"\" graph = self.graph walks = []", "graph.degree()], [x for x in range(20, 90, 10)] ) for", "start = time.time() logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks", "= int(total_edges / 10) logging.info( f\"Beginning preprocessing of transition probabilities", "sum(unnormalized_probs) normalized_probs = [ float(u_prob) / norm_const for u_prob in", "Any, degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate a random walk starting", "+ 0.2) break # the degree is above the last", "length of the walk. \"\"\" new_walk_length = None for i,", "with different behaviors, we insist that the caller create an", "int): raise TypeError(f\"{name} must be an int\") if value <=", "is not on), then the lower degree nodes will take", "lower degree nodes from biasing your resulting embedding. If a", "Use these many worker threads to train the model. Default", "return walk @staticmethod def _get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length:", "degree, determine the length of a walk that should be", "being less nodes to choose from) and will bias your", "Any, destination: Any): \"\"\" Get the alias edge setup lists", "f\"Completed. Ending time is {str(end)} Elapsed time is {str(start -", "nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None, ):", "\"DiGraph that represents the manner in which they'd like the", "logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph at", "= node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node", "is in the 20-80 percentiles, linearly interpolate between 1 and", "randomization. Returns ------- Tuple[np.array, List[Any]] A tuple containing a matrix,", "Optional[np.ndarray], ): \"\"\" Simulate a random walk starting from start", "take last edge weight). If the graph is unweighted, the", "self.random_state.shuffle(nodes) for node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles,", "larger = [] for i, prob in enumerate(probabilities): alias[i] =", "int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False, ): \"\"\" Repeatedly", "are using the 'interpolate_walk_lengths_by_node_degree' feature. # the intent of the", "dimensions : int Dimensionality of the word vectors. Default is", ") end = time.time() logging.info( f\"Completed. Ending time is {str(end)}", "0 is invalid but can happen depending on the percentiles", "def _get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length: int ): \"\"\"", "10. walk_length: int Length of walk per source. Default is", "walk)) for walk in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model", "dimensions, window_size, workers, iterations, random_seed ) end = time.time() logging.info(", "= [float(u_prob) / norm_const for u_prob in unnormalized_probs] return _alias_setup(normalized_probs)", "return_hyperparameter self.q = inout_hyperparameter self.random_state = random_state def node2vec_walk( self,", "embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter)", "Default is None and will produce a random output. Note", "produce random results \"\"\" def __init__( self, graph: nx.Graph, return_hyperparameter:", "their being less nodes to choose from) and will bias", "self.alias_edges = alias_edges return def _alias_setup(probabilities: List[float]): \"\"\" Compute utility", "time from typing import Any, List, Optional, Tuple, Union import", "- https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1,", "be a float\") if value < 0.0: raise ValueError(f\"{name} must", "of this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter)", "on the Alias Method used in this functionality can be", "then the lower degree nodes will take a smaller breadth", "1 if current_edge > bucket * quotient: bucket += 1", "corresponding vertex labels for each row in the matrix. The", "unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append(", "== 0 and degree < percentile: return 1 # otherwise,", "variable ``PYTHONHASHSEED`` must be set to control hash randomization. Returns", "smaller breadth of random walks when compared to the high", "workers : int Use these many worker threads to train", "https://github.com/aditya-grover/node2vec/. Further details on the Alias Method used in this", "iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph,", "degree nodes have the # same number of walks as", "import networkx as nx import numpy as np from ..utils", "window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise", "Elapsed time is {str(start - end)}\" ) labels = node2vec_graph.original_graph.nodes()", "ordering jitter from OS thread scheduling. In addition the environment", "worker threads to train the model. Default is 8. iterations", "import time from typing import Any, List, Optional, Tuple, Union", "walk.append(next) else: break return walk @staticmethod def _get_walk_length_interpolated( degree: int,", "= new_id_map self.is_directed = self.graph.is_directed() self.p = return_hyperparameter self.q =", "of 0 is invalid but can happen depending on the", "current_node = 0 quotient = int(total_nodes / 10) logging.info( f\"Beginning", "also contains a vector containing the corresponding vertex labels for", "int, return_hyperparameter: float, inout_hyperparameter: float, dimensions: int, window_size: int, workers:", "_assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool):", "choose from) and will bias your resulting Word2Vec embedding if", "contains a vector containing the corresponding vertex labels for each", "the matrix. The matrix and vector are positionally correlated. Notes", "Otherwise, if the degree is greater than the last element", "1.0: smaller.append(i) else: larger.append(i) while len(smaller) > 0 and len(larger)", "degree = nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length", "linearly from min to max. # This is to avoid", "= self.is_directed alias_nodes = {} total_nodes = len(graph.nodes()) bucket =", "alias_nodes = {} total_nodes = len(graph.nodes()) bucket = 0 current_node", "random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state", "1 and ``walk_length``. This will reduce lower degree nodes from", "interpolation to decide the length of the walk. \"\"\" new_walk_length", "raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\") def _learn_embeddings( walks: List[Any],", "is 1.0 dimensions : int Dimensionality of the word vectors.", "= self.graph.is_directed() self.p = return_hyperparameter self.q = inout_hyperparameter self.random_state =", "walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while len(walk) <", "to each nodes degree. If the node is in the", "than the first element of the percentiles list, default the", "walk length to 1. Otherwise, if the degree is greater", "# otherwise, find which bucket we are going to be", "10% to a # maximum walk length, and interpolate the", "= graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string", "new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed", "edges\") self.alias_nodes = alias_nodes self.alias_edges = alias_edges return def _alias_setup(probabilities:", "to create the embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A", "falls in the middle, do a linear interpolation to decide", "from) and will bias your resulting Word2Vec embedding if degree_percentiles", ": float Inout hyperparameter random_state : np.random.RandomState Random State for", "_alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of transition probabilities for vertices\" )", ": int Number of epochs in stochastic gradient descent (SGD)", "_Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f\"Starting preprocessing of", "the top 10 percentile, use ``walk_length``. If it is in", "\"\"\" Learn embeddings by optimizing the skip-gram objective using SGD.", "if alias[i] < 1.0: smaller.append(i) else: larger.append(i) while len(smaller) >", "\"\"\" number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if", "walks on graph at time {str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks,", "= {} total_nodes = len(graph.nodes()) bucket = 0 current_node =", "digraph. A multigraph should be turned into a non-multigraph so", "the word vectors. Default is 128. window_size : int Maximum", "the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``. This", "nbr in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs = [", "of transition probabilities on graph with {str(len(graph.nodes()))} nodes and \"", "= self.alias_nodes alias_edges = self.alias_edges walk = [start_node] # Percentiles", "unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q", "walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\",", "if len(current_neighbors) > 0: if len(walk) == 1: walk.append( current_neighbors[", "degree node (which it will if this setting is not", "= [ float(u_prob) / norm_const for u_prob in unnormalized_probs ]", "= int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() < alias[random_index]: return random_index", "iter=iterations, seed=random_seed, ) return model class _Node2VecGraph: \"\"\" Temporary inner", "If the node is in the bottom 20 percentile, default", "insist that the caller create an appropriate Graph or \"", "low degree node has the same number of walks as", "nodes. random_seed : int Seed to be used for reproducible", "to a # maximum walk length, and interpolate the inner", "if current_node > bucket * quotient: bucket += 1 logging.info(f\"Completed", "= 0 quotient = int(total_edges / 10) logging.info( f\"Beginning preprocessing", "SGD. \"\"\" from gensim.models import Word2Vec walks = [list(map(str, walk))", "num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False, ): \"\"\"", "self.graph is_directed = self.is_directed alias_nodes = {} total_nodes = len(graph.nodes())", "linearly interpolate between 1 and ``walk_length``. This will reduce lower", "The matrix and vector are positionally correlated. Notes ----- The", "1.0, inout_hyperparameter: float = 1.0, dimensions: int = 128, window_size:", "start_node: Any, degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate a random walk", "logging.info( f\"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))}", "[1] <NAME> and <NAME> \"node2vec: Scalable Feature Learning for Networks.\"", "degree nodes. If the low degree nodes have the #", "{str(end)} Elapsed time is {str(start - end)}\" ) labels =", "in graph.nodes(): current_node += 1 if current_node > bucket *", "degree_percentiles = np.percentile( [degree for _, degree in graph.degree()], [x", "+ alias[small] - 1.0 if alias[large] < 1.0: smaller.append(large) else:", "from OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED``", "int Maximum distance between the current and predicted word within", "float, dimensions: int, window_size: int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree:", "for {total_edges} edges\" ) if is_directed: for edge in graph.edges():", "the list, default it to the max_walk_length. If it falls", "Default is 128. window_size : int Maximum distance between the", "node. The tuple also contains a vector containing the corresponding", "degree < percentile: return 1 # otherwise, find which bucket", "( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels, ) def _assert_is_positive_int(name:", "def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int = 10, walk_length:", "Number of epochs in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree :", "def _alias_setup(probabilities: List[float]): \"\"\" Compute utility lists for non-uniform sampling", "min to max. # This is to avoid having your", "\"\"\" graph = self.graph is_directed = self.is_directed alias_nodes = {}", "walks, size=dimensions, window=window_size, min_count=0, sg=1, # Training algorithm: 1 for", "corresponds to each nodes degree. If the node is in", "Training algorithm: 1 for skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed,", "as the high degree nodes, the low degree nodes will", "quotient = int(total_edges / 10) logging.info( f\"Beginning preprocessing of transition", "logging.info(f\"Learning embeddings at time {str(time.time())}\") model = _learn_embeddings( walks, dimensions,", "return math.floor(new_walk_length) def _simulate_walks( self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree:", "random walks when compared to the high degree nodes. This", "limit to a single worker thread (`workers=1`), to eliminate ordering", "= 8, iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool = True,", "a bool\") def _learn_embeddings( walks: List[Any], dimensions: int, window_size: int,", "walk in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec(", "positionally correlated. Notes ----- The original reference implementation of node2vec", "(`workers=1`), to eliminate ordering jitter from OS thread scheduling. In", "iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a", "= None, ): self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map =", "they'd like the multigraph to be treated for the \"", "CBOW workers=workers, iter=iterations, seed=random_seed, ) return model class _Node2VecGraph: \"\"\"", "logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1],", "degree nodes, the low degree nodes will take a smaller", "graph return_hyperparameter : float Return hyperparameter inout_hyperparameter : float Inout", "This will result in your lower degree walks dominating your", "is {str(end)} Elapsed time is {str(start - end)}\" ) labels", "{} total_edges = len(graph.edges()) bucket = 0 current_edge = 0", "max_walk_length # a walk length of 0 is invalid but", "we are below the first percentile in the list, default", "the bottom 20% of to a minimal walk length, default", "to train the model. Default is 8. iterations : int", "end = time.time() logging.info( f\"Completed. Ending time is {str(end)} Elapsed", "next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, )", "<= percentile: new_walk_length = max_walk_length * ((i * 0.1) +", "bool Use a dynamic walk length that corresponds to each", "from a non-uniform discrete distribution using alias sampling. \"\"\" number_of_outcomes", "if degree_percentiles is not None: degree = nx.degree(graph, start_node) walk_length", "_get_alias_edge(self, source: Any, destination: Any): \"\"\" Get the alias edge", "norm_const for u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default:", "i, prob in enumerate(probabilities): alias[i] = number_of_outcomes * prob if", "If the low degree nodes have the # same number", "self.alias_edges walk = [start_node] # Percentiles will be provided if", "def _simulate_walks( self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool =", "random_state: Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph = graph graph_with_new_ids,", "the \" \"purposes of this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\",", "walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state,", "to control hash randomization. Returns ------- Tuple[np.array, List[Any]] A tuple", "int = 128, window_size: int = 10, workers: int =", "this setting is not on), then the lower degree nodes", "results. Default is None and will produce random results \"\"\"", "breadth of paths # (due to their being less nodes", "{total_nodes} vertices\" ) for node in graph.nodes(): current_node += 1", "128. window_size : int Maximum distance between the current and", "# a walk length of 0 is invalid but can", "self.p = return_hyperparameter self.q = inout_hyperparameter self.random_state = random_state def", "= Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1, # Training algorithm:", "graph: nx.Graph A networkx graph return_hyperparameter : float Return hyperparameter", "alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed", "random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() < alias[random_index]: return", ") _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\",", "return_hyperparameter : float Return hyperparameter (p). Default is 1.0 inout_hyperparameter", "self.random_state = random_state def node2vec_walk( self, walk_length: int, start_node: Any,", "Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes = len(probabilities) alias", "be a bool\") def _learn_embeddings( walks: List[Any], dimensions: int, window_size:", "inout_hyperparameter : float Inout hyperparameter (q). Default is 1.0 dimensions", "walk length of 0 is invalid but can happen depending", "functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1]", "of walks as a high degree node (which it will", "not on), then the lower degree nodes will take a", "nodes will take a smaller breadth of random walks when", "bucket * quotient: bucket += 1 logging.info(f\"Completed {current_node} / {total_nodes}", "np.random.RandomState ): \"\"\" Draw sample from a non-uniform discrete distribution", "comes from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details on the", "default the bottom 20% of to a minimal walk length,", "= True, random_seed: Optional[int] = None, ) -> Tuple[np.array, List[Any]]:", "is greater than the last element of the list, default", "1 for skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed, ) return", "u_prob in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing", "labels, ) def _assert_is_positive_int(name: str, value: int): if not isinstance(value,", "default to a walk length of 1. If it is", "self.graph p = self.p q = self.q unnormalized_probs = []", "for i, percentile in enumerate(percentiles): # if we are below", "Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1, # Training algorithm: 1", "in the middle, do a linear interpolation to decide the", ": np.random.RandomState Random State for reproducible results. Default is None", "logging.info(f\"Simulating walks on graph at time {str(time.time())}\") walks = node2vec_graph._simulate_walks(", "str(num_walks) ) self.random_state.shuffle(nodes) for node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length,", "= self._get_alias_edge(edge[0], edge[1]) else: for edge in graph.edges(): current_edge +=", "destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1)", "labels]), labels, ) def _assert_is_positive_int(name: str, value: int): if not", "return_hyperparameter: float, inout_hyperparameter: float, dimensions: int, window_size: int, workers: int,", "simulate random walks from each node. \"\"\" graph = self.graph", "degree node has the same number of walks as a", ": float Return hyperparameter (p). Default is 1.0 inout_hyperparameter :", "must be an int\") if value <= 0: raise ValueError(f\"{name}", "is unweighted, the weight of each edge will default to", "The original reference implementation of node2vec comes from Aditya Grover", "is in the top 10 percentile, use ``walk_length``. If it", "treated for the \" \"purposes of this embedding\" ) _assert_is_positive_int(\"num_walks\",", "A networkx graph or digraph. A multigraph should be turned", "return model class _Node2VecGraph: \"\"\" Temporary inner state object for", "and \" f\"{str(len(graph.edges()))} edges\" ) start = time.time() logging.info(f\"Starting at", "the manner in which they'd like the multigraph to be", "isinstance(value, float): raise TypeError(f\"{name} must be a float\") if value", "+= 1 if current_node > bucket * quotient: bucket +=", "new_walk_length = None for i, percentile in enumerate(percentiles): # if", "= larger.pop() sampled_probabilities[small] = large alias[large] = alias[large] + alias[small]", "random_state: np.random.RandomState ): \"\"\" Draw sample from a non-uniform discrete", "the embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A networkx graph", "of node2vec comes from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details", "degree_percentiles is not None: degree = nx.degree(graph, start_node) walk_length =", "small = smaller.pop() large = larger.pop() sampled_probabilities[small] = large alias[large]", "networkx graph return_hyperparameter : float Return hyperparameter inout_hyperparameter : float", "int, interpolate_walk_lengths_by_node_degree: bool = False, ): \"\"\" Repeatedly simulate random", "total_edges = len(graph.edges()) bucket = 0 current_edge = 0 quotient", "in your lower degree walks dominating your higher degree nodes.", "else: break return walk @staticmethod def _get_walk_length_interpolated( degree: int, percentiles:", "algorithm: 1 for skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed, )", "from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details on the Alias", "probabilities for vertices\" ) alias_edges = {} total_edges = len(graph.edges())", "raise TypeError(f\"{name} must be an int\") if value <= 0:", "prev = walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev,", "for each node. The tuple also contains a vector containing", "\"multigraph with different behaviors, we insist that the caller create", "= None, ) -> Tuple[np.array, List[Any]]: \"\"\" Generates a node2vec", "for {total_nodes} vertices\" ) for node in graph.nodes(): current_node +=", "code is to default the bottom 20% of to a", "If it falls in the middle, do a linear interpolation", "random walk starting from start node. \"\"\" graph = self.graph", "def _learn_embeddings( walks: List[Any], dimensions: int, window_size: int, workers: int,", "of paths # (due to their being less nodes to", "walks from each node. \"\"\" graph = self.graph walks =", "\"\"\" number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes,", "intent of the code is to default the bottom 20%", "to the high degree nodes. This will result in your", "(c) Microsoft Corporation and contributors. # Licensed under the MIT", "self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while len(walk) < walk_length: current", "tuple containing a matrix, with each row index corresponding to", "= np.zeros(number_of_outcomes, dtype=int) smaller = [] larger = [] for", "between the current and predicted word within a sentence. Default", "iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int]", "the high degree nodes. This will result in your lower", "discrete distribution using alias sampling. \"\"\" number_of_outcomes = len(probabilities) random_index", "a sentence. Default is 10. workers : int Use these", "be a networkx Graph or DiGraph\") if graph.is_multigraph(): raise ValueError(", "thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be", "be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] <NAME> and", "edge in graph.edges(): current_edge += 1 if current_edge > bucket", "inner 70% linearly from min to max. # This is", "degree_percentiles=degree_percentiles, ) ) return walks def _get_alias_edge(self, source: Any, destination:", "MIT License. import logging import math import time from typing", "bucket * quotient: bucket += 1 logging.info(f\"Completed {current_edge} / {total_edges}", "= 1 return math.floor(new_walk_length) def _simulate_walks( self, num_walks: int, walk_length:", "= _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of transition probabilities for vertices\"", ") return model class _Node2VecGraph: \"\"\" Temporary inner state object", "1.0 inout_hyperparameter : float Inout hyperparameter (q). Default is 1.0", "for node in graph.nodes(): current_node += 1 if current_node >", ": int Seed to be used for reproducible results. Default", "Licensed under the MIT License. import logging import math import", ": float Inout hyperparameter (q). Default is 1.0 dimensions :", "= self.graph walks = [] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray]", "from ..utils import remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks:", "unweighted, the weight of each edge will default to 1.", "0: small = smaller.pop() large = larger.pop() sampled_probabilities[small] = large", "raise ValueError(f\"{name} must be > 0\") def _assert_is_nonnegative_float(name: str, value:", "weight_default) for nbr in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs", "from start node. \"\"\" graph = self.graph alias_nodes = self.alias_nodes", "List, Optional, Tuple, Union import networkx as nx import numpy", "< walk_length: current = walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors)", "length that corresponds to each nodes degree. If the node", "appropriate Graph or \" \"DiGraph that represents the manner in", "sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p", "containing the corresponding vertex labels for each row in the", "Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A networkx graph or digraph.", "are two reasonable ways to treat a \" \"multigraph with", "walk_length: int = 80, return_hyperparameter: float = 1.0, inout_hyperparameter: float", "vector containing the corresponding vertex labels for each row in", "not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\") def", "preprocessing of transition probabilities for edges\") self.alias_nodes = alias_nodes self.alias_edges", "length of 1. If it is in the top 10", "= 0 quotient = int(total_nodes / 10) logging.info( f\"Beginning preprocessing", "for nbr in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs =", "node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at time {str(time.time())}\")", "walk length, and interpolate the inner 70% linearly from min", "= len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller", "graph = self.graph alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk", "corresponding to the embedding for each node. The tuple also", "0.1) + 0.2) break # the degree is above the", "< 1.0: smaller.append(i) else: larger.append(i) while len(smaller) > 0 and", ") labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node])", "must be a networkx Graph or DiGraph\") if graph.is_multigraph(): raise", "is 1.0 inout_hyperparameter : float Inout hyperparameter (q). Default is", "nx import numpy as np from ..utils import remap_node_ids def", "Default is 10. walk_length: int Length of walk per source.", "biasing your resulting embedding. If a low degree node has", "destination: Any): \"\"\" Get the alias edge setup lists for", "degree nodes. This will result in your lower degree walks", "model class _Node2VecGraph: \"\"\" Temporary inner state object for constructing", ") elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append(", "walks, dimensions, window_size, workers, iterations, random_seed ) end = time.time()", "<= 0: raise ValueError(f\"{name} must be > 0\") def _assert_is_nonnegative_float(name:", "\"\"\" graph = self.graph alias_nodes = self.alias_nodes alias_edges = self.alias_edges", "below the first percentile in the list, default to a", "= inout_hyperparameter self.random_state = random_state def node2vec_walk( self, walk_length: int,", "number_of_outcomes = len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand()", "A tuple containing a matrix, with each row index corresponding", "end)}\" ) labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return (", "the caller create an appropriate Graph or \" \"DiGraph that", "each node. The tuple also contains a vector containing the", "environment variable ``PYTHONHASHSEED`` must be set to control hash randomization.", "a given graph. Will follow the word2vec algorithm to create", "an appropriate Graph or \" \"DiGraph that represents the manner", "to decide the length of the walk. \"\"\" new_walk_length =", "tuple also contains a vector containing the corresponding vertex labels", "\"\"\" Given a node's degree, determine the length of a", "length of 1 if i == 0 and degree <", "# the intent of the code is to default the", "[float(u_prob) / norm_const for u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def", "interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter,", "_assert_is_nonnegative_float(name: str, value: float): if not isinstance(value, float): raise TypeError(f\"{name}", "graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q )", "80, return_hyperparameter: float = 1.0, inout_hyperparameter: float = 1.0, dimensions:", "current and predicted word within a sentence. Default is 10.", "larger.append(large) return sampled_probabilities, alias def _alias_draw( probabilities: List[float], alias: List[float],", "node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels, )", "nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles", "ValueError( \"This function does not work on multigraphs - because", "less nodes to choose from) and will bias your resulting", "node2vec embedding from a given graph. Will follow the word2vec", "bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\") def _learn_embeddings( walks:", "distribution using alias sampling. \"\"\" number_of_outcomes = len(probabilities) random_index =", "of transition probabilities for vertices\" ) alias_edges = {} total_edges", "on), then the lower degree nodes will take a smaller", "for i, prob in enumerate(probabilities): alias[i] = number_of_outcomes * prob", "from typing import Any, List, Optional, Tuple, Union import networkx", "/ {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] =", "walks def _get_alias_edge(self, source: Any, destination: Any): \"\"\" Get the", "----- The original reference implementation of node2vec comes from Aditya", "there are two reasonable ways to treat a \" \"multigraph", "if i == 0 and degree < percentile: return 1", "_preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter: float,", "in the list, default to a walk length of 1", "time.time() logging.info( f\"Completed. Ending time is {str(end)} Elapsed time is", "Returns ------- Tuple[np.array, List[Any]] A tuple containing a matrix, with", "---------- graph: nx.Graph A networkx graph return_hyperparameter : float Return", "for walk in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model =", "import Any, List, Optional, Tuple, Union import networkx as nx", "because there are two reasonable ways to treat a \"", "used in this functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References", "within a sentence. Default is 10. workers : int Use", "inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed)", "that the caller create an appropriate Graph or \" \"DiGraph", "{str(time.time())}\") model = _learn_embeddings( walks, dimensions, window_size, workers, iterations, random_seed", "constructing the random walks Parameters ---------- graph: nx.Graph A networkx", "remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]),", "len(probabilities) random_index = int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() < alias[random_index]:", "1.0 if alias[large] < 1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities,", "be treated for the \" \"purposes of this embedding\" )", "> 0 and len(larger) > 0: small = smaller.pop() large", "alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of transition probabilities", "..utils import remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int", "the weight of each edge will default to 1. num_walks", "are positionally correlated. Notes ----- The original reference implementation of", "last edge weight). If the graph is unweighted, the weight", "Parameters ---------- graph: nx.Graph A networkx graph return_hyperparameter : float", "node. \"\"\" graph = self.graph walks = [] nodes =", "new_walk_length = max_walk_length # a walk length of 0 is", "word within a sentence. Default is 10. workers : int", "alias_edges return def _alias_setup(probabilities: List[float]): \"\"\" Compute utility lists for", "Percentiles will be provided if we are using the 'interpolate_walk_lengths_by_node_degree'", "alias_edges = self.alias_edges walk = [start_node] # Percentiles will be", "reproducible results. Default is None and will produce random results", "which they'd like the multigraph to be treated for the", "f\"Completed preprocessing of transition probabilities for vertices\" ) alias_edges =", "will produce a random output. Note that for a fully", "> bucket * quotient: bucket += 1 logging.info(f\"Completed {current_node} /", "current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] ) else: prev", "float, inout_hyperparameter: float, dimensions: int, window_size: int, workers: int, iterations:", "which bucket we are going to be in. if degree", "p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else:", "Knowledge Discovery and Data Mining, 2016. \"\"\" _preconditions( graph, num_walks,", "): \"\"\" Given a node's degree, determine the length of", "be used. If the degree is less than the first", "Compute utility lists for non-uniform sampling from discrete distributions. Refer", "default to a walk length of 1 if i ==", "details \"\"\" number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities =", "Default is 8. iterations : int Number of epochs in", "embedding from a given graph. Will follow the word2vec algorithm", "vertices\" ) alias_edges = {} total_edges = len(graph.edges()) bucket =", "walk_length: int, return_hyperparameter: float, inout_hyperparameter: float, dimensions: int, window_size: int,", "nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while", "walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] ) else:", "/ 10) logging.info( f\"Beginning preprocessing of transition probabilities for {total_nodes}", "node in graph.nodes(): current_node += 1 if current_node > bucket", "not None: degree = nx.degree(graph, start_node) walk_length = self._get_walk_length_interpolated( degree,", "- end)}\" ) labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return", "the first percentile in the list, default to a walk", "\"\"\" new_walk_length = None for i, percentile in enumerate(percentiles): #", "window_size: int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if", "in walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks,", "alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk = [start_node] #", "row index corresponding to the embedding for each node. The", "float Return hyperparameter (p). Default is 1.0 inout_hyperparameter : float", "(p). Default is 1.0 inout_hyperparameter : float Inout hyperparameter (q).", "1 if current_node > bucket * quotient: bucket += 1", "= alias_edges return def _alias_setup(probabilities: List[float]): \"\"\" Compute utility lists", "into a non-multigraph so that the calling user properly handles", "manner in which they'd like the multigraph to be treated", "percentile in enumerate(percentiles): # if we are below the first", "= remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed =", "logging import math import time from typing import Any, List,", "iteration: \" + str(walk_iteration + 1) + \"/\" + str(num_walks)", "graph[destination][destination_neighbor].get(\"weight\", 1) / q ) norm_const = sum(unnormalized_probs) normalized_probs =", "] ) else: prev = walk[-2] next = current_neighbors[ _alias_draw(", "float Inout hyperparameter random_state : np.random.RandomState Random State for reproducible", "to be used for reproducible results. Default is None and", "in which they'd like the multigraph to be treated for", "is 10. workers : int Use these many worker threads", "workers) _assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must", "= self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing", "ways to treat a \" \"multigraph with different behaviors, we", "the multigraph to be treated for the \" \"purposes of", "https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes)", "walks when compared to the high degree nodes. This will", "prob in enumerate(probabilities): alias[i] = number_of_outcomes * prob if alias[i]", "int, start_node: Any, degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate a random", "embeddings at time {str(time.time())}\") model = _learn_embeddings( walks, dimensions, window_size,", "# Copyright (c) Microsoft Corporation and contributors. # Licensed under", "float(u_prob) / norm_const for u_prob in unnormalized_probs ] alias_nodes[node] =", "the top 10% to a # maximum walk length, and", "bucket += 1 logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge] =", "to max. # This is to avoid having your random", "resulting embedding. If a low degree node has the same", "graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree,", "and vector are positionally correlated. Notes ----- The original reference", "License. import logging import math import time from typing import", "10)] ) for walk_iteration in range(num_walks): logging.info( \"Walk iteration: \"", "1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] )", "] norm_const = sum(unnormalized_probs) normalized_probs = [ float(u_prob) / norm_const", "def _get_alias_edge(self, source: Any, destination: Any): \"\"\" Get the alias", "paths # (due to their being less nodes to choose", "or take last edge weight). If the graph is unweighted,", "while len(smaller) > 0 and len(larger) > 0: small =", "if graph.is_multigraph(): raise ValueError( \"This function does not work on", "logging.info( f\"Beginning preprocessing of transition probabilities for {total_nodes} vertices\" )", "alias[large] = alias[large] + alias[small] - 1.0 if alias[large] <", "random walks. \"\"\" graph = self.graph is_directed = self.is_directed alias_nodes", "determine the length of a walk that should be used.", "/ {total_nodes} vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default) for nbr", "large = larger.pop() sampled_probabilities[small] = large alias[large] = alias[large] +", "percentiles used if new_walk_length < 1: new_walk_length = 1 return", "the last percentile if not new_walk_length: new_walk_length = max_walk_length #", "def _preprocess_transition_probabilities(self, weight_default: float = 1.0): \"\"\" Preprocessing of transition", "for node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, )", "dimensions: int = 128, window_size: int = 10, workers: int", "different behaviors, we insist that the caller create an appropriate", "0.0\") def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int,", "None, ) -> Tuple[np.array, List[Any]]: \"\"\" Generates a node2vec embedding", "math.floor(new_walk_length) def _simulate_walks( self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool", "a fully deterministically-reproducible run, you must also limit to a", "high degree nodes, the low degree nodes will take a", "while len(walk) < walk_length: current = walk[-1] current_neighbors = sorted(graph.neighbors(current))", "the word2vec algorithm to create the embedding. Parameters ---------- graph:", "element of the percentiles list, default the walk length to", "{total_edges} edges\" ) if is_directed: for edge in graph.edges(): current_edge", "interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for _, degree in graph.degree()],", "= return_hyperparameter self.q = inout_hyperparameter self.random_state = random_state def node2vec_walk(", "This is to avoid having your random walks be dominated", ": int Use these many worker threads to train the", "\" \"DiGraph that represents the manner in which they'd like", "normalized_probs = [ float(u_prob) / norm_const for u_prob in unnormalized_probs", "maximum walk length, and interpolate the inner 70% linearly from", "descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use a dynamic walk length", "= 1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int] = None,", "is {str(start - end)}\" ) labels = node2vec_graph.original_graph.nodes() remapped_labels =", "import numpy as np from ..utils import remap_node_ids def node2vec_embed(", "\" \"purposes of this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length)", "def _alias_draw( probabilities: List[float], alias: List[float], random_state: np.random.RandomState ): \"\"\"", "Union[nx.Graph, nx.DiGraph], num_walks: int = 10, walk_length: int = 80,", "random_seed ) end = time.time() logging.info( f\"Completed. Ending time is", "if is_directed: for edge in graph.edges(): current_edge += 1 if", "in sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs = [ float(u_prob)", "lists for non-uniform sampling from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/", "node (which it will if this setting is not on),", "going to be in. if degree <= percentile: new_walk_length =", "containing a matrix, with each row index corresponding to the", "\"\"\" from gensim.models import Word2Vec walks = [list(map(str, walk)) for", "time {str(time.time())}\") model = _learn_embeddings( walks, dimensions, window_size, workers, iterations,", "_alias_draw( probabilities: List[float], alias: List[float], random_state: np.random.RandomState ): \"\"\" Draw", "random_seed : int Seed to be used for reproducible results.", "depending on the percentiles used if new_walk_length < 1: new_walk_length", "produce a random output. Note that for a fully deterministically-reproducible", "logging.info( f\"Completed preprocessing of transition probabilities for vertices\" ) alias_edges", "Draw sample from a non-uniform discrete distribution using alias sampling.", "self.graph.is_directed() self.p = return_hyperparameter self.q = inout_hyperparameter self.random_state = random_state", "self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False, ):", "new_id_map self.is_directed = self.graph.is_directed() self.p = return_hyperparameter self.q = inout_hyperparameter", "max. # This is to avoid having your random walks", "[] for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append(", "start_node=node, degree_percentiles=degree_percentiles, ) ) return walks def _get_alias_edge(self, source: Any,", "/ 10) logging.info( f\"Beginning preprocessing of transition probabilities for {total_edges}", "given edge. \"\"\" graph = self.graph p = self.p q", "@staticmethod def _get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length: int ):", "self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph =", "middle, do a linear interpolation to decide the length of", "int Seed to be used for reproducible results. Default is", "= 128, window_size: int = 10, workers: int = 8,", "random walks from each node. \"\"\" graph = self.graph walks", "walk @staticmethod def _get_walk_length_interpolated( degree: int, percentiles: list, max_walk_length: int", "= 10, walk_length: int = 80, return_hyperparameter: float = 1.0,", "addition the environment variable ``PYTHONHASHSEED`` must be set to control", "percentiles, linearly interpolate between 1 and ``walk_length``. This will reduce", "low degree nodes will take a smaller breadth of paths", "If it is in the top 10 percentile, use ``walk_length``.", "> 0: small = smaller.pop() large = larger.pop() sampled_probabilities[small] =", "window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph =", "a walk length of 0 is invalid but can happen", "to a walk length of 1 if i == 0", "def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter:", "len(current_neighbors) > 0: if len(walk) == 1: walk.append( current_neighbors[ _alias_draw(", "should be turned into a non-multigraph so that the calling", "workers: int, iterations: int, random_seed: Optional[int], ): \"\"\" Learn embeddings", "_alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ] ) else: prev =", "norm_const for u_prob in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info(", "at time {str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree )", "and contributors. # Licensed under the MIT License. import logging", "results \"\"\" def __init__( self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter:", "new_walk_length = max_walk_length * ((i * 0.1) + 0.2) break", "the 'interpolate_walk_lengths_by_node_degree' feature. # the intent of the code is", "= 80, return_hyperparameter: float = 1.0, inout_hyperparameter: float = 1.0,", "worker thread (`workers=1`), to eliminate ordering jitter from OS thread", "implementation of node2vec comes from Aditya Grover from https://github.com/aditya-grover/node2vec/. Further", "Preprocessing of transition probabilities for guiding the random walks. \"\"\"", "= np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller = [] larger", "{str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings", "= sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob in", "hyperparameter (q). Default is 1.0 dimensions : int Dimensionality of", "if we are below the first percentile in the list,", "number_of_outcomes)) if random_state.rand() < alias[random_index]: return random_index else: return probabilities[random_index]", "smaller.append(large) else: larger.append(large) return sampled_probabilities, alias def _alias_draw( probabilities: List[float],", "nx.Graph = graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids", "time is {str(start - end)}\" ) labels = node2vec_graph.original_graph.nodes() remapped_labels", "to their being less nodes to choose from) and will", "bottom 20% of to a minimal walk length, default the", "walk_length ) while len(walk) < walk_length: current = walk[-1] current_neighbors", "matrix. The matrix and vector are positionally correlated. Notes -----", "from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes", "jitter from OS thread scheduling. In addition the environment variable", "for guiding the random walks. \"\"\" graph = self.graph is_directed", "= time.time() logging.info( f\"Completed. Ending time is {str(end)} Elapsed time", "alias_edges = {} total_edges = len(graph.edges()) bucket = 0 current_edge", "matrix and vector are positionally correlated. Notes ----- The original", "edges\" ) start = time.time() logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities()", "time.time() logging.info(f\"Starting at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph", "degree: int, percentiles: list, max_walk_length: int ): \"\"\" Given a", "embedding if degree_percentiles is not None: degree = nx.degree(graph, start_node)", "a given edge. \"\"\" graph = self.graph p = self.p", "_preprocess_transition_probabilities(self, weight_default: float = 1.0): \"\"\" Preprocessing of transition probabilities", ": int Number of walks per source. Default is 10.", "does not work on multigraphs - because there are two", "each nodes degree. If the node is in the bottom", "self.random_state, ) ] ) else: prev = walk[-2] next =", "degree. If the node is in the bottom 20 percentile,", "alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next) else: break", "= large alias[large] = alias[large] + alias[small] - 1.0 if", "transition probabilities for {total_edges} edges\" ) if is_directed: for edge", "of to a minimal walk length, default the top 10%", "default the top 10% to a # maximum walk length,", "is invalid but can happen depending on the percentiles used", "it to the max_walk_length. If it falls in the middle,", "source. Default is 80. return_hyperparameter : float Return hyperparameter (p).", "utility lists for non-uniform sampling from discrete distributions. Refer to", "+ 1) + \"/\" + str(num_walks) ) self.random_state.shuffle(nodes) for node", "_assert_is_positive_int(\"iterations\", iterations) if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be", "sampling from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\"", "Graph or \" \"DiGraph that represents the manner in which", "Grover from https://github.com/aditya-grover/node2vec/. Further details on the Alias Method used", "else: larger.append(large) return sampled_probabilities, alias def _alias_draw( probabilities: List[float], alias:", "be > 0\") def _assert_is_nonnegative_float(name: str, value: float): if not", "Data Mining, 2016. \"\"\" _preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter,", "List[float]): \"\"\" Compute utility lists for non-uniform sampling from discrete", "this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\",", "the lower degree nodes will take a smaller breadth of", "1 # otherwise, find which bucket we are going to", "Optional, Tuple, Union import networkx as nx import numpy as", "= self.alias_edges walk = [start_node] # Percentiles will be provided", "'interpolate_walk_lengths_by_node_degree' feature. # the intent of the code is to", "_assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions)", "having your random walks be dominated by low degree nodes.", "(which it will if this setting is not on), then", "a \" \"multigraph with different behaviors, we insist that the", "from https://github.com/aditya-grover/node2vec/. Further details on the Alias Method used in", "List[Any]]: \"\"\" Generates a node2vec embedding from a given graph.", "in enumerate(percentiles): # if we are below the first percentile", "Notes ----- The original reference implementation of node2vec comes from", "value < 0.0: raise ValueError(f\"{name} must be >= 0.0\") def", "+ str(num_walks) ) self.random_state.shuffle(nodes) for node in nodes: walks.append( self.node2vec_walk(", "unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q ) norm_const = sum(unnormalized_probs) normalized_probs", "8, iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed:", "edge. \"\"\" graph = self.graph p = self.p q =", "If a low degree node has the same number of", "= {} total_edges = len(graph.edges()) bucket = 0 current_edge =", "= 0 current_edge = 0 quotient = int(total_edges / 10)", "float\") if value < 0.0: raise ValueError(f\"{name} must be >=", "len(smaller) > 0 and len(larger) > 0: small = smaller.pop()", "interpolate the inner 70% linearly from min to max. #", "number of walks as the high degree nodes, the low", "int, window_size: int, workers: int, iterations: int, random_seed: Optional[int], ):", "list, default it to the max_walk_length. If it falls in", "it will if this setting is not on), then the", "value: int): if not isinstance(value, int): raise TypeError(f\"{name} must be", "skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed, ) return model class", "guiding the random walks. \"\"\" graph = self.graph is_directed =", "for reproducible results. Default is None and will produce random", "0.2) break # the degree is above the last percentile", "Default is 1.0 inout_hyperparameter : float Inout hyperparameter (q). Default", "graph.edges(): current_edge += 1 if current_edge > bucket * quotient:", "size=dimensions, window=window_size, min_count=0, sg=1, # Training algorithm: 1 for skip-gram;", "like the multigraph to be treated for the \" \"purposes", "of transition probabilities for guiding the random walks. \"\"\" graph", "these many worker threads to train the model. Default is", "dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph", "low degree nodes. If the low degree nodes have the", "walk length of 1. If it is in the top", ") logging.info(f\"Learning embeddings at time {str(time.time())}\") model = _learn_embeddings( walks,", "(i.e. aggregate weights or take last edge weight). If the", "self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of", "f\"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes", "from a given graph. Will follow the word2vec algorithm to", "float): if not isinstance(value, float): raise TypeError(f\"{name} must be a", "float Return hyperparameter inout_hyperparameter : float Inout hyperparameter random_state :", "\"\"\" Preprocessing of transition probabilities for guiding the random walks.", "window_size: int = 10, workers: int = 8, iterations: int", "of walk per source. Default is 80. return_hyperparameter : float", "interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at time {str(time.time())}\") model = _learn_embeddings(", "if len(walk) == 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state,", "embedding for each node. The tuple also contains a vector", "will result in your lower degree walks dominating your higher", "\"\"\" Generates a node2vec embedding from a given graph. Will", "a single worker thread (`workers=1`), to eliminate ordering jitter from", "float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph", "to avoid having your random walks be dominated by low", "nodes and \" f\"{str(len(graph.edges()))} edges\" ) start = time.time() logging.info(f\"Starting", "details on the Alias Method used in this functionality can", "not isinstance(graph, nx.Graph): raise TypeError(\"graph must be a networkx Graph", "if not isinstance(graph, nx.Graph): raise TypeError(\"graph must be a networkx", "nodes from biasing your resulting embedding. If a low degree", "preprocessing of transition probabilities for vertices\" ) alias_edges = {}", "len(larger) > 0: small = smaller.pop() large = larger.pop() sampled_probabilities[small]", "return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state =", "Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions, window=window_size, min_count=0,", "length, default the top 10% to a # maximum walk", "1. If it is in the top 10 percentile, use", "probabilities for {total_nodes} vertices\" ) for node in graph.nodes(): current_node", "from biasing your resulting embedding. If a low degree node", "start_node) walk_length = self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while len(walk)", "enumerate(probabilities): alias[i] = number_of_outcomes * prob if alias[i] < 1.0:", "current = walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) > 0:", "graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f\"Starting preprocessing of transition", "raise TypeError(\"graph must be a networkx Graph or DiGraph\") if", "networkx graph or digraph. A multigraph should be turned into", "_preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations,", "degree, degree_percentiles, walk_length ) while len(walk) < walk_length: current =", "None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for _, degree", "each edge will default to 1. num_walks : int Number", "follow the word2vec algorithm to create the embedding. Parameters ----------", "of the walk. \"\"\" new_walk_length = None for i, percentile", "max_walk_length * ((i * 0.1) + 0.2) break # the", "the length of a walk that should be used. If", "np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter, random_state ) logging.info(", "hyperparameter (p). Default is 1.0 inout_hyperparameter : float Inout hyperparameter", "aggregate weights or take last edge weight). If the graph", "num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at time {str(time.time())}\") model", "walks. \"\"\" graph = self.graph is_directed = self.is_directed alias_nodes =", ") for node in graph.nodes(): current_node += 1 if current_node", "Scalable Feature Learning for Networks.\" Knowledge Discovery and Data Mining,", "bool\") def _learn_embeddings( walks: List[Any], dimensions: int, window_size: int, workers:", "list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile(", "window_size, workers, iterations, random_seed ) end = time.time() logging.info( f\"Completed.", "to a walk length of 1. If it is in", "result in your lower degree walks dominating your higher degree", "with {str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))} edges\" ) start =", ") ] walk.append(next) else: break return walk @staticmethod def _get_walk_length_interpolated(", "in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use a", "in graph.edges(): current_edge += 1 if current_edge > bucket *", "p = self.p q = self.q unnormalized_probs = [] for", "reduce lower degree nodes from biasing your resulting embedding. If", "walks be dominated by low degree nodes. If the low", "of walks per source. Default is 10. walk_length: int Length", "Tuple[np.array, List[Any]] A tuple containing a matrix, with each row", "walk_length: int Length of walk per source. Default is 80.", "\"\"\" Simulate a random walk starting from start node. \"\"\"", "for _, degree in graph.degree()], [x for x in range(20,", "degree walks dominating your higher degree nodes. random_seed : int", "return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\", window_size) _assert_is_positive_int(\"workers\", workers) _assert_is_positive_int(\"iterations\",", "smaller = [] larger = [] for i, prob in", "node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int = 10, walk_length: int", "if value < 0.0: raise ValueError(f\"{name} must be >= 0.0\")", "logging.info( f\"Beginning preprocessing of transition probabilities for {total_edges} edges\" )", "be turned into a non-multigraph so that the calling user", "Aditya Grover from https://github.com/aditya-grover/node2vec/. Further details on the Alias Method", "so that the calling user properly handles the multi-edges (i.e.", "nodes. This will result in your lower degree walks dominating", "= 1.0, inout_hyperparameter: float = 1.0, dimensions: int = 128,", "_learn_embeddings( walks: List[Any], dimensions: int, window_size: int, workers: int, iterations:", "/ norm_const for u_prob in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs)", "your lower degree walks dominating your higher degree nodes. random_seed", "self.graph walks = [] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] =", "* number_of_outcomes)) if random_state.rand() < alias[random_index]: return random_index else: return", "edge[0])] = self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of transition probabilities for", "graph: Union[nx.Graph, nx.DiGraph] A networkx graph or digraph. A multigraph", "percentile in the list, default to a walk length of", "Word2Vec embedding if degree_percentiles is not None: degree = nx.degree(graph,", "workers: int = 8, iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool", "object for constructing the random walks Parameters ---------- graph: nx.Graph", "smaller.append(i) else: larger.append(i) while len(smaller) > 0 and len(larger) >", "TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\") def _learn_embeddings( walks: List[Any], dimensions:", "80. return_hyperparameter : float Return hyperparameter (p). Default is 1.0", "for constructing the random walks Parameters ---------- graph: nx.Graph A", "+ str(walk_iteration + 1) + \"/\" + str(num_walks) ) self.random_state.shuffle(nodes)", "for node in labels]), labels, ) def _assert_is_positive_int(name: str, value:", "# (due to their being less nodes to choose from)", "10, workers: int = 8, iterations: int = 1, interpolate_walk_lengths_by_node_degree:", "q = self.q unnormalized_probs = [] for destination_neighbor in sorted(graph.neighbors(destination)):", "reproducible results. Default is None and will produce a random", ") return walks def _get_alias_edge(self, source: Any, destination: Any): \"\"\"", ") while len(walk) < walk_length: current = walk[-1] current_neighbors =", "walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at", "invalid but can happen depending on the percentiles used if", "int(np.floor(random_state.rand() * number_of_outcomes)) if random_state.rand() < alias[random_index]: return random_index else:", "= number_of_outcomes * prob if alias[i] < 1.0: smaller.append(i) else:", "first element of the percentiles list, default the walk length", "weight_default: float = 1.0): \"\"\" Preprocessing of transition probabilities for", "networkx Graph or DiGraph\") if graph.is_multigraph(): raise ValueError( \"This function", "eliminate ordering jitter from OS thread scheduling. In addition the", "bucket = 0 current_edge = 0 quotient = int(total_edges /", "model = Word2Vec( walks, size=dimensions, window=window_size, min_count=0, sg=1, # Training", "* ((i * 0.1) + 0.2) break # the degree", "bias your resulting Word2Vec embedding if degree_percentiles is not None:", "``walk_length``. If it is in the 20-80 percentiles, linearly interpolate", "class _Node2VecGraph: \"\"\" Temporary inner state object for constructing the", "but can happen depending on the percentiles used if new_walk_length", "weight of each edge will default to 1. num_walks :", "graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter: float, inout_hyperparameter:", "nodes, the low degree nodes will take a smaller breadth", "List[float], alias: List[float], random_state: np.random.RandomState ): \"\"\" Draw sample from", "source: Any, destination: Any): \"\"\" Get the alias edge setup", "to the embedding for each node. The tuple also contains", "<NAME> \"node2vec: Scalable Feature Learning for Networks.\" Knowledge Discovery and", "edge[0]) logging.info(f\"Completed preprocessing of transition probabilities for edges\") self.alias_nodes =", "= current_neighbors[ _alias_draw( alias_edges[(prev, current)][0], alias_edges[(prev, current)][1], self.random_state, ) ]", "int ): \"\"\" Given a node's degree, determine the length", "Ending time is {str(end)} Elapsed time is {str(start - end)}\"", "current_node > bucket * quotient: bucket += 1 logging.info(f\"Completed {current_node}", "dominating your higher degree nodes. random_seed : int Seed to", "first percentile in the list, default to a walk length", "normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs] return", "probabilities: List[float], alias: List[float], random_state: np.random.RandomState ): \"\"\" Draw sample", "+= 1 if current_edge > bucket * quotient: bucket +=", "Union import networkx as nx import numpy as np from", "return_hyperparameter, inout_hyperparameter, random_state ) logging.info( f\"Starting preprocessing of transition probabilities", "raise ValueError( \"This function does not work on multigraphs -", "workers=workers, iter=iterations, seed=random_seed, ) return model class _Node2VecGraph: \"\"\" Temporary", "in the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``.", "degree nodes. random_seed : int Seed to be used for", "= [list(map(str, walk)) for walk in walks] # Documentation -", "as a high degree node (which it will if this", "from gensim.models import Word2Vec walks = [list(map(str, walk)) for walk", "\"\"\" Temporary inner state object for constructing the random walks", "= 1.0, dimensions: int = 128, window_size: int = 10,", "Default is None and will produce random results \"\"\" def", "70% linearly from min to max. # This is to", "sampled_probabilities[small] = large alias[large] = alias[large] + alias[small] - 1.0", "when compared to the high degree nodes. This will result", "= self.p q = self.q unnormalized_probs = [] for destination_neighbor", "a node's degree, determine the length of a walk that", "= 10, workers: int = 8, iterations: int = 1,", "_simulate_walks( self, num_walks: int, walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False,", "high degree node (which it will if this setting is", "the MIT License. import logging import math import time from", "< 1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities, alias def _alias_draw(", "will be provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature.", ".. [1] <NAME> and <NAME> \"node2vec: Scalable Feature Learning for", "((i * 0.1) + 0.2) break # the degree is", "epochs in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use", "a random walk starting from start node. \"\"\" graph =", "+= 1 logging.info(f\"Completed {current_node} / {total_nodes} vertices\") unnormalized_probs = [", "+= 1 logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0],", "edge will default to 1. num_walks : int Number of", "alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for edge in graph.edges(): current_edge", "_assert_is_positive_int(name: str, value: int): if not isinstance(value, int): raise TypeError(f\"{name}", "isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\") def _learn_embeddings(", "current)][1], self.random_state, ) ] walk.append(next) else: break return walk @staticmethod", "of transition probabilities for edges\") self.alias_nodes = alias_nodes self.alias_edges =", "float, random_state: Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph = graph", "and ``walk_length``. This will reduce lower degree nodes from biasing", "TypeError(f\"{name} must be a float\") if value < 0.0: raise", "if value <= 0: raise ValueError(f\"{name} must be > 0\")", "preprocessing of transition probabilities for {total_nodes} vertices\" ) for node", "and <NAME> \"node2vec: Scalable Feature Learning for Networks.\" Knowledge Discovery", "self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed() self.p = return_hyperparameter self.q", "new_walk_length: new_walk_length = max_walk_length # a walk length of 0", "* 0.1) + 0.2) break # the degree is above", "<gh_stars>0 # Copyright (c) Microsoft Corporation and contributors. # Licensed", "logging.info( \"Walk iteration: \" + str(walk_iteration + 1) + \"/\"", "else: for edge in graph.edges(): current_edge += 1 if current_edge", "if alias[large] < 1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities, alias", "Number of walks per source. Default is 10. walk_length: int", "a non-uniform discrete distribution using alias sampling. \"\"\" number_of_outcomes =", "a linear interpolation to decide the length of the walk.", "percentile if not new_walk_length: new_walk_length = max_walk_length # a walk", "algorithm to create the embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph]", ": int Maximum distance between the current and predicted word", "is above the last percentile if not new_walk_length: new_walk_length =", "of the percentiles list, default the walk length to 1.", "or digraph. A multigraph should be turned into a non-multigraph", "the current and predicted word within a sentence. Default is", "np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels, ) def _assert_is_positive_int(name: str,", "for u_prob in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed", "each node. \"\"\" graph = self.graph walks = [] nodes", "if not isinstance(value, float): raise TypeError(f\"{name} must be a float\")", "the last element of the list, default it to the", "alias[large] + alias[small] - 1.0 if alias[large] < 1.0: smaller.append(large)", "degree nodes will take a smaller breadth of paths #", "higher degree nodes. random_seed : int Seed to be used", "take a smaller breadth of paths # (due to their", "A multigraph should be turned into a non-multigraph so that", "hyperparameter random_state : np.random.RandomState Random State for reproducible results. Default", "low degree nodes have the # same number of walks", "percentiles list, default the walk length to 1. Otherwise, if", "num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\", return_hyperparameter) _assert_is_nonnegative_float(\"inout_hyperparameter\", inout_hyperparameter) _assert_is_positive_int(\"dimensions\", dimensions) _assert_is_positive_int(\"window_size\",", "bucket = 0 current_node = 0 quotient = int(total_nodes /", "labels for each row in the matrix. The matrix and", "node in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) )", "percentile: return 1 # otherwise, find which bucket we are", "iterations: int, random_seed: Optional[int], ): \"\"\" Learn embeddings by optimizing", "f\"{str(len(graph.edges()))} edges\" ) start = time.time() logging.info(f\"Starting at time {str(start)}\")", "TypeError(f\"{name} must be an int\") if value <= 0: raise", "\" + str(walk_iteration + 1) + \"/\" + str(num_walks) )", "to treat a \" \"multigraph with different behaviors, we insist", "be >= 0.0\") def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int,", "networkx as nx import numpy as np from ..utils import", "vertex labels for each row in the matrix. The matrix", "in nodes: walks.append( self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return", "graph = self.graph p = self.p q = self.q unnormalized_probs", "source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) /", ") for walk_iteration in range(num_walks): logging.info( \"Walk iteration: \" +", "will take a smaller breadth of random walks when compared", "breadth of random walks when compared to the high degree", "distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes = len(probabilities)", "are going to be in. if degree <= percentile: new_walk_length", "Corporation and contributors. # Licensed under the MIT License. import", "bottom 20 percentile, default to a walk length of 1.", "Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map", "] walk.append(next) else: break return walk @staticmethod def _get_walk_length_interpolated( degree:", "the degree is above the last percentile if not new_walk_length:", "setup lists for a given edge. \"\"\" graph = self.graph", "index corresponding to the embedding for each node. The tuple", "= _learn_embeddings( walks, dimensions, window_size, workers, iterations, random_seed ) end", "isinstance(graph, nx.Graph): raise TypeError(\"graph must be a networkx Graph or", "Get the alias edge setup lists for a given edge.", "Mining, 2016. \"\"\" _preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions,", "1) / p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1)", "if this setting is not on), then the lower degree", "= self.graph p = self.p q = self.q unnormalized_probs =", ") else: prev = walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev,", "to a minimal walk length, default the top 10% to", "lower degree nodes will take a smaller breadth of random", "1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / q ) norm_const", "the low degree nodes will take a smaller breadth of", "unnormalized_probs = [] for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor ==", "that represents the manner in which they'd like the multigraph", "a random output. Note that for a fully deterministically-reproducible run,", "\"\"\" Get the alias edge setup lists for a given", "sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]", "will reduce lower degree nodes from biasing your resulting embedding.", "Optional[int], ): \"\"\" Learn embeddings by optimizing the skip-gram objective", "] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of transition probabilities", "walk_length: current = walk[-1] current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) >", "If the graph is unweighted, the weight of each edge", "in labels]), labels, ) def _assert_is_positive_int(name: str, value: int): if", "is 8. iterations : int Number of epochs in stochastic", "= [] nodes = list(graph.nodes()) degree_percentiles: Optional[np.ndarray] = None if", "graph is unweighted, the weight of each edge will default", "np.percentile( [degree for _, degree in graph.degree()], [x for x", "= [ graph[node][nbr].get(\"weight\", weight_default) for nbr in sorted(graph.neighbors(node)) ] norm_const", "random walks be dominated by low degree nodes. If the", "walk = [start_node] # Percentiles will be provided if we", "\" f\"{str(len(graph.edges()))} edges\" ) start = time.time() logging.info(f\"Starting at time", "under the MIT License. import logging import math import time", "random_seed: Optional[int], ): \"\"\" Learn embeddings by optimizing the skip-gram", "State for reproducible results. Default is None and will produce", "inout_hyperparameter : float Inout hyperparameter random_state : np.random.RandomState Random State", "total_nodes = len(graph.nodes()) bucket = 0 current_node = 0 quotient", "smaller breadth of paths # (due to their being less", "number of walks as a high degree node (which it", "if we are using the 'interpolate_walk_lengths_by_node_degree' feature. # the intent", "of 1. If it is in the top 10 percentile,", "for edges\") self.alias_nodes = alias_nodes self.alias_edges = alias_edges return def", "\"This function does not work on multigraphs - because there", "degree_percentiles: Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree", "fully deterministically-reproducible run, you must also limit to a single", "multi-edges (i.e. aggregate weights or take last edge weight). If", "dynamic walk length that corresponds to each nodes degree. If", "Alias Method used in this functionality can be found at", ") norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for", "the low degree nodes have the # same number of", "= node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]), labels,", "a # maximum walk length, and interpolate the inner 70%", "prob if alias[i] < 1.0: smaller.append(i) else: larger.append(i) while len(smaller)", "Random State for reproducible results. Default is None and will", "found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] <NAME> and <NAME>", "do a linear interpolation to decide the length of the", "in range(num_walks): logging.info( \"Walk iteration: \" + str(walk_iteration + 1)", "List[float], random_state: np.random.RandomState ): \"\"\" Draw sample from a non-uniform", "import remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph], num_walks: int =", "workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph(", "import math import time from typing import Any, List, Optional,", "current_neighbors = sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if len(walk) ==", "float = 1.0, inout_hyperparameter: float = 1.0, dimensions: int =", "window_size : int Maximum distance between the current and predicted", "a node2vec embedding from a given graph. Will follow the", "many worker threads to train the model. Default is 8.", "__init__( self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState]", "): \"\"\" Learn embeddings by optimizing the skip-gram objective using", "properly handles the multi-edges (i.e. aggregate weights or take last", "will bias your resulting Word2Vec embedding if degree_percentiles is not", "ValueError(f\"{name} must be > 0\") def _assert_is_nonnegative_float(name: str, value: float):", ") logging.info( f\"Starting preprocessing of transition probabilities on graph with", "int, random_seed: Optional[int], ): \"\"\" Learn embeddings by optimizing the", "in the top 10 percentile, use ``walk_length``. If it is", "a high degree node (which it will if this setting", "probabilities on graph with {str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))} edges\"", "number_of_outcomes * prob if alias[i] < 1.0: smaller.append(i) else: larger.append(i)", "num_walks: int = 10, walk_length: int = 80, return_hyperparameter: float", "user properly handles the multi-edges (i.e. aggregate weights or take", "used for reproducible results. Default is None and will produce", "{str(start - end)}\" ) labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string", ": bool Use a dynamic walk length that corresponds to", "have the # same number of walks as the high", "decide the length of the walk. \"\"\" new_walk_length = None", "self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float, random_state: Optional[np.random.RandomState] =", "def node2vec_walk( self, walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray], ):", "for skip-gram; otherwise CBOW workers=workers, iter=iterations, seed=random_seed, ) return model", "walk length of 1 if i == 0 and degree", "is_directed: for edge in graph.edges(): current_edge += 1 if current_edge", "at time {str(time.time())}\") model = _learn_embeddings( walks, dimensions, window_size, workers,", "1) / q ) norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob)", "Learning for Networks.\" Knowledge Discovery and Data Mining, 2016. \"\"\"", "20 percentile, default to a walk length of 1. If", "0 quotient = int(total_nodes / 10) logging.info( f\"Beginning preprocessing of", "nx.DiGraph], num_walks: int = 10, walk_length: int = 80, return_hyperparameter:", "is None and will produce random results \"\"\" def __init__(", "dimensions: int, window_size: int, workers: int, iterations: int, random_seed: Optional[int],", "nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter: float, inout_hyperparameter: float, dimensions:", "be in. if degree <= percentile: new_walk_length = max_walk_length *", "if new_walk_length < 1: new_walk_length = 1 return math.floor(new_walk_length) def", "logging.info( f\"Completed. Ending time is {str(end)} Elapsed time is {str(start", "sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if len(walk) == 1: walk.append(", "range(num_walks): logging.info( \"Walk iteration: \" + str(walk_iteration + 1) +", "iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if not isinstance(graph, nx.Graph): raise", "int, iterations: int, random_seed: Optional[int], ): \"\"\" Learn embeddings by", "edge weight). If the graph is unweighted, the weight of", "int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ): if not", "= sorted(graph.neighbors(current)) if len(current_neighbors) > 0: if len(walk) == 1:", "interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int] = None, ) ->", "for a fully deterministically-reproducible run, you must also limit to", "== 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, ) ]", "the environment variable ``PYTHONHASHSEED`` must be set to control hash", "run, you must also limit to a single worker thread", "output. Note that for a fully deterministically-reproducible run, you must", "and will produce random results \"\"\" def __init__( self, graph:", "embeddings by optimizing the skip-gram objective using SGD. \"\"\" from", "is less than the first element of the percentiles list,", "None and will produce a random output. Note that for", "interpolate_walk_lengths_by_node_degree: bool = False, ): \"\"\" Repeatedly simulate random walks", "the skip-gram objective using SGD. \"\"\" from gensim.models import Word2Vec", "In addition the environment variable ``PYTHONHASHSEED`` must be set to", "1 logging.info(f\"Completed {current_node} / {total_nodes} vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\",", "_Node2VecGraph: \"\"\" Temporary inner state object for constructing the random", "= 0 current_node = 0 quotient = int(total_nodes / 10)", "smaller.pop() large = larger.pop() sampled_probabilities[small] = large alias[large] = alias[large]", "in unnormalized_probs ] alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of", "a matrix, with each row index corresponding to the embedding", "a walk length of 1. If it is in the", "the inner 70% linearly from min to max. # This", "https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] <NAME> and <NAME> \"node2vec: Scalable", "x in range(20, 90, 10)] ) for walk_iteration in range(num_walks):", "not work on multigraphs - because there are two reasonable", "= node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at time", ") random_state = np.random.RandomState(seed=random_seed) node2vec_graph = _Node2VecGraph( graph, return_hyperparameter, inout_hyperparameter,", "a smaller breadth of random walks when compared to the", "bucket += 1 logging.info(f\"Completed {current_node} / {total_nodes} vertices\") unnormalized_probs =", "walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers, iterations, interpolate_walk_lengths_by_node_degree, ) random_state", "provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature. # the", "\"\"\" Repeatedly simulate random walks from each node. \"\"\" graph", "length of 0 is invalid but can happen depending on", "for Networks.\" Knowledge Discovery and Data Mining, 2016. \"\"\" _preconditions(", "correlated. Notes ----- The original reference implementation of node2vec comes", "0 current_node = 0 quotient = int(total_nodes / 10) logging.info(", "dominated by low degree nodes. If the low degree nodes", "self.p q = self.q unnormalized_probs = [] for destination_neighbor in", "of 1 if i == 0 and degree < percentile:", "the bottom 20 percentile, default to a walk length of", "0 quotient = int(total_edges / 10) logging.info( f\"Beginning preprocessing of", "greater than the last element of the list, default it", "Networks.\" Knowledge Discovery and Data Mining, 2016. \"\"\" _preconditions( graph,", "scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be set", "int Number of walks per source. Default is 10. walk_length:", "[] larger = [] for i, prob in enumerate(probabilities): alias[i]", ") def _assert_is_positive_int(name: str, value: int): if not isinstance(value, int):", "int, walk_length: int, return_hyperparameter: float, inout_hyperparameter: float, dimensions: int, window_size:", "{current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) alias_edges[(edge[1], edge[0])]", "0: if len(walk) == 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1],", "non-uniform discrete distribution using alias sampling. \"\"\" number_of_outcomes = len(probabilities)", "new_walk_length = 1 return math.floor(new_walk_length) def _simulate_walks( self, num_walks: int,", "feature. # the intent of the code is to default", "alias_nodes[node] = _alias_setup(normalized_probs) logging.info( f\"Completed preprocessing of transition probabilities for", "bool = False, ): \"\"\" Repeatedly simulate random walks from", "we are using the 'interpolate_walk_lengths_by_node_degree' feature. # the intent of", "Optional[int] = None, ) -> Tuple[np.array, List[Any]]: \"\"\" Generates a", "stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use a dynamic", "probabilities for {total_edges} edges\" ) if is_directed: for edge in", "node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for node in", "= alias_nodes self.alias_edges = alias_edges return def _alias_setup(probabilities: List[float]): \"\"\"", "random_state def node2vec_walk( self, walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray],", "to default the bottom 20% of to a minimal walk", "norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) / norm_const for u_prob", "= self.graph alias_nodes = self.alias_nodes alias_edges = self.alias_edges walk =", "be provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature. #", "Union[nx.Graph, nx.DiGraph] A networkx graph or digraph. A multigraph should", "for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\",", "vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default) for nbr in sorted(graph.neighbors(node))", "graph[destination][destination_neighbor].get(\"weight\", 1) / p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\",", "be dominated by low degree nodes. If the low degree", "node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph at time {str(time.time())}\") walks =", ">= 0.0\") def _preconditions( graph: Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length:", "time {str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning", "random_state ) logging.info( f\"Starting preprocessing of transition probabilities on graph", "1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities, alias def _alias_draw( probabilities:", "{str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))} edges\" ) start = time.time()", "treat a \" \"multigraph with different behaviors, we insist that", "by optimizing the skip-gram objective using SGD. \"\"\" from gensim.models", "and Data Mining, 2016. \"\"\" _preconditions( graph, num_walks, walk_length, return_hyperparameter,", "isinstance(value, int): raise TypeError(f\"{name} must be an int\") if value", "happen depending on the percentiles used if new_walk_length < 1:", "Optional[np.ndarray] = None if interpolate_walk_lengths_by_node_degree: degree_percentiles = np.percentile( [degree for", "1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int] = None, )", "the percentiles list, default the walk length to 1. Otherwise,", ") ) return walks def _get_alias_edge(self, source: Any, destination: Any):", "thread (`workers=1`), to eliminate ordering jitter from OS thread scheduling.", "at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] <NAME> and <NAME> \"node2vec:", "np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller = [] larger =", "True, random_seed: Optional[int] = None, ) -> Tuple[np.array, List[Any]]: \"\"\"", "otherwise, find which bucket we are going to be in.", ") alias_edges = {} total_edges = len(graph.edges()) bucket = 0", "= max_walk_length * ((i * 0.1) + 0.2) break #", "optimizing the skip-gram objective using SGD. \"\"\" from gensim.models import", "to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details \"\"\" number_of_outcomes = len(probabilities) alias =", "minimal walk length, default the top 10% to a #", "2016. \"\"\" _preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size,", "Dimensionality of the word vectors. Default is 128. window_size :", "< 0.0: raise ValueError(f\"{name} must be >= 0.0\") def _preconditions(", "each row index corresponding to the embedding for each node.", "reasonable ways to treat a \" \"multigraph with different behaviors,", "percentiles: list, max_walk_length: int ): \"\"\" Given a node's degree,", "transition probabilities on graph with {str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))}", "we are going to be in. if degree <= percentile:", "1 logging.info(f\"Completed {current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])", "num_walks : int Number of walks per source. Default is", "self.alias_nodes = alias_nodes self.alias_edges = alias_edges return def _alias_setup(probabilities: List[float]):", "else: prev = walk[-2] next = current_neighbors[ _alias_draw( alias_edges[(prev, current)][0],", ") -> Tuple[np.array, List[Any]]: \"\"\" Generates a node2vec embedding from", "max_walk_length. If it falls in the middle, do a linear", "skip-gram objective using SGD. \"\"\" from gensim.models import Word2Vec walks", "\"Walk iteration: \" + str(walk_iteration + 1) + \"/\" +", "node. \"\"\" graph = self.graph alias_nodes = self.alias_nodes alias_edges =", "> 0\") def _assert_is_nonnegative_float(name: str, value: float): if not isinstance(value,", "# the degree is above the last percentile if not", "[degree for _, degree in graph.degree()], [x for x in", "If it is in the 20-80 percentiles, linearly interpolate between", "(due to their being less nodes to choose from) and", "---------- graph: Union[nx.Graph, nx.DiGraph] A networkx graph or digraph. A", "graph: Union[nx.Graph, nx.DiGraph], num_walks: int = 10, walk_length: int =", "= None for i, percentile in enumerate(percentiles): # if we", "row in the matrix. The matrix and vector are positionally", "unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default) for nbr in sorted(graph.neighbors(node)) ]", "current_edge = 0 quotient = int(total_edges / 10) logging.info( f\"Beginning", "walk_length: int, interpolate_walk_lengths_by_node_degree: bool = False, ): \"\"\" Repeatedly simulate", "be an int\") if value <= 0: raise ValueError(f\"{name} must", "str(walk_iteration + 1) + \"/\" + str(num_walks) ) self.random_state.shuffle(nodes) for", "if not isinstance(interpolate_walk_lengths_by_node_degree, bool): raise TypeError(\"interpolate_walk_lengths_by_node_degree must be a bool\")", "[ float(u_prob) / norm_const for u_prob in unnormalized_probs ] alias_nodes[node]", "1. num_walks : int Number of walks per source. Default", "the node is in the bottom 20 percentile, default to", "state object for constructing the random walks Parameters ---------- graph:", "for vertices\" ) alias_edges = {} total_edges = len(graph.edges()) bucket", "the # same number of walks as the high degree", "graph with {str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))} edges\" ) start", "given graph. Will follow the word2vec algorithm to create the", "90, 10)] ) for walk_iteration in range(num_walks): logging.info( \"Walk iteration:", "graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed() self.p = return_hyperparameter", "# Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions, window=window_size,", "1.0): \"\"\" Preprocessing of transition probabilities for guiding the random", "a walk length of 1 if i == 0 and", "of epochs in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool", "it is in the top 10 percentile, use ``walk_length``. If", "self.alias_nodes alias_edges = self.alias_edges walk = [start_node] # Percentiles will", "number_of_outcomes = len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int)", "* quotient: bucket += 1 logging.info(f\"Completed {current_edge} / {total_edges} edges\")", "1.0 dimensions : int Dimensionality of the word vectors. Default", "the degree is less than the first element of the", "alias[small] - 1.0 if alias[large] < 1.0: smaller.append(large) else: larger.append(large)", "): self.original_graph: nx.Graph = graph graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph", "will if this setting is not on), then the lower", "labels = node2vec_graph.original_graph.nodes() remapped_labels = node2vec_graph.label_map_to_string return ( np.array([model.wv.get_vector(remapped_labels[node]) for", "probabilities for guiding the random walks. \"\"\" graph = self.graph", "setting is not on), then the lower degree nodes will", "same number of walks as the high degree nodes, the", "graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) ) else: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1)", "to be in. if degree <= percentile: new_walk_length = max_walk_length", "interpolate_walk_lengths_by_node_degree : bool Use a dynamic walk length that corresponds", "> bucket * quotient: bucket += 1 logging.info(f\"Completed {current_edge} /", "= np.percentile( [degree for _, degree in graph.degree()], [x for", "a float\") if value < 0.0: raise ValueError(f\"{name} must be", "the multi-edges (i.e. aggregate weights or take last edge weight).", "on graph with {str(len(graph.nodes()))} nodes and \" f\"{str(len(graph.edges()))} edges\" )", "create the embedding. Parameters ---------- graph: Union[nx.Graph, nx.DiGraph] A networkx", "self.is_directed = self.graph.is_directed() self.p = return_hyperparameter self.q = inout_hyperparameter self.random_state", "{current_edge} / {total_edges} edges\") alias_edges[edge] = self._get_alias_edge(edge[0], edge[1]) else: for", "it is in the 20-80 percentiles, linearly interpolate between 1", "Seed to be used for reproducible results. Default is None", "iterations : int Number of epochs in stochastic gradient descent", "self.node2vec_walk( walk_length=walk_length, start_node=node, degree_percentiles=degree_percentiles, ) ) return walks def _get_alias_edge(self,", "self.graph = graph_with_new_ids self.label_map_to_string = new_id_map self.is_directed = self.graph.is_directed() self.p", "if not isinstance(value, int): raise TypeError(f\"{name} must be an int\")", "int(total_nodes / 10) logging.info( f\"Beginning preprocessing of transition probabilities for", "at time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph at time", "- 1.0 if alias[large] < 1.0: smaller.append(large) else: larger.append(large) return", "/ norm_const for u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self,", "that corresponds to each nodes degree. If the node is", "walks] # Documentation - https://radimrehurek.com/gensim/models/word2vec.html model = Word2Vec( walks, size=dimensions,", "Inout hyperparameter (q). Default is 1.0 dimensions : int Dimensionality", "transition probabilities for {total_nodes} vertices\" ) for node in graph.nodes():", "128, window_size: int = 10, workers: int = 8, iterations:", "alias_nodes[current][1], self.random_state, ) ] ) else: prev = walk[-2] next", "walks as a high degree node (which it will if", "for non-uniform sampling from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for", "\"purposes of this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks) _assert_is_positive_int(\"walk_length\", walk_length) _assert_is_nonnegative_float(\"return_hyperparameter\",", "0\") def _assert_is_nonnegative_float(name: str, value: float): if not isinstance(value, float):", "each row in the matrix. The matrix and vector are", "that the calling user properly handles the multi-edges (i.e. aggregate", "inout_hyperparameter: float, random_state: Optional[np.random.RandomState] = None, ): self.original_graph: nx.Graph =", "self.q = inout_hyperparameter self.random_state = random_state def node2vec_walk( self, walk_length:", "Graph or DiGraph\") if graph.is_multigraph(): raise ValueError( \"This function does", "return_hyperparameter: float = 1.0, inout_hyperparameter: float = 1.0, dimensions: int", "destination_neighbor == source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p ) elif", "larger.pop() sampled_probabilities[small] = large alias[large] = alias[large] + alias[small] -", "you must also limit to a single worker thread (`workers=1`),", "set to control hash randomization. Returns ------- Tuple[np.array, List[Any]] A", "Union[nx.Graph, nx.DiGraph], num_walks: int, walk_length: int, return_hyperparameter: float, inout_hyperparameter: float,", "current_edge > bucket * quotient: bucket += 1 logging.info(f\"Completed {current_edge}", "on multigraphs - because there are two reasonable ways to", "hash randomization. Returns ------- Tuple[np.array, List[Any]] A tuple containing a", "int = 1, interpolate_walk_lengths_by_node_degree: bool = True, random_seed: Optional[int] =", "control hash randomization. Returns ------- Tuple[np.array, List[Any]] A tuple containing", "walk_length, interpolate_walk_lengths_by_node_degree ) logging.info(f\"Learning embeddings at time {str(time.time())}\") model =", "non-uniform sampling from discrete distributions. Refer to https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ for details", "walk length, default the top 10% to a # maximum", "turned into a non-multigraph so that the calling user properly", "str, value: int): if not isinstance(value, int): raise TypeError(f\"{name} must", "vectors. Default is 128. window_size : int Maximum distance between", "new_walk_length < 1: new_walk_length = 1 return math.floor(new_walk_length) def _simulate_walks(", "graph at time {str(time.time())}\") walks = node2vec_graph._simulate_walks( num_walks, walk_length, interpolate_walk_lengths_by_node_degree", "graph_with_new_ids, new_id_map = remap_node_ids(graph=graph) self.graph = graph_with_new_ids self.label_map_to_string = new_id_map", "for walk_iteration in range(num_walks): logging.info( \"Walk iteration: \" + str(walk_iteration", "not isinstance(value, int): raise TypeError(f\"{name} must be an int\") if", "behaviors, we insist that the caller create an appropriate Graph", "node has the same number of walks as a high", "Microsoft Corporation and contributors. # Licensed under the MIT License.", "len(walk) == 1: walk.append( current_neighbors[ _alias_draw( alias_nodes[current][0], alias_nodes[current][1], self.random_state, )", "int Number of epochs in stochastic gradient descent (SGD) interpolate_walk_lengths_by_node_degree", "an int\") if value <= 0: raise ValueError(f\"{name} must be", "in graph.degree()], [x for x in range(20, 90, 10)] )", "np from ..utils import remap_node_ids def node2vec_embed( graph: Union[nx.Graph, nx.DiGraph],", "is 80. return_hyperparameter : float Return hyperparameter (p). Default is", "Note that for a fully deterministically-reproducible run, you must also", "# Training algorithm: 1 for skip-gram; otherwise CBOW workers=workers, iter=iterations,", "walk_length: int, start_node: Any, degree_percentiles: Optional[np.ndarray], ): \"\"\" Simulate a", "= len(graph.edges()) bucket = 0 current_edge = 0 quotient =", "distance between the current and predicted word within a sentence.", "== source: unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) / p ) elif graph.has_edge(destination_neighbor,", "results. Default is None and will produce a random output.", "------- Tuple[np.array, List[Any]] A tuple containing a matrix, with each", "1) + \"/\" + str(num_walks) ) self.random_state.shuffle(nodes) for node in", "must be a bool\") def _learn_embeddings( walks: List[Any], dimensions: int,", "not isinstance(value, float): raise TypeError(f\"{name} must be a float\") if", "of each edge will default to 1. num_walks : int", "bucket we are going to be in. if degree <=", "predicted word within a sentence. Default is 10. workers :", "walk length that corresponds to each nodes degree. If the", "# same number of walks as the high degree nodes,", "Temporary inner state object for constructing the random walks Parameters", "a vector containing the corresponding vertex labels for each row", "probabilities for edges\") self.alias_nodes = alias_nodes self.alias_edges = alias_edges return", "of transition probabilities for {total_nodes} vertices\" ) for node in", "this functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- ..", "gensim.models import Word2Vec walks = [list(map(str, walk)) for walk in", "alias[i] = number_of_outcomes * prob if alias[i] < 1.0: smaller.append(i)", "work on multigraphs - because there are two reasonable ways", "inout_hyperparameter, random_state ) logging.info( f\"Starting preprocessing of transition probabilities on", "walk. \"\"\" new_walk_length = None for i, percentile in enumerate(percentiles):", "logging.info(f\"Completed {current_node} / {total_nodes} vertices\") unnormalized_probs = [ graph[node][nbr].get(\"weight\", weight_default)", "return walks def _get_alias_edge(self, source: Any, destination: Any): \"\"\" Get", "= [] for destination_neighbor in sorted(graph.neighbors(destination)): if destination_neighbor == source:", "sorted(graph.neighbors(node)) ] norm_const = sum(unnormalized_probs) normalized_probs = [ float(u_prob) /", "np.zeros(number_of_outcomes, dtype=int) smaller = [] larger = [] for i,", "quotient = int(total_nodes / 10) logging.info( f\"Beginning preprocessing of transition", "of a walk that should be used. If the degree", "using the 'interpolate_walk_lengths_by_node_degree' feature. # the intent of the code", "weights or take last edge weight). If the graph is", "non-multigraph so that the calling user properly handles the multi-edges", "degree <= percentile: new_walk_length = max_walk_length * ((i * 0.1)", "len(probabilities) alias = np.zeros(number_of_outcomes) sampled_probabilities = np.zeros(number_of_outcomes, dtype=int) smaller =", "Any, List, Optional, Tuple, Union import networkx as nx import", "u_prob in unnormalized_probs] return _alias_setup(normalized_probs) def _preprocess_transition_probabilities(self, weight_default: float =", "If the degree is less than the first element of", "Will follow the word2vec algorithm to create the embedding. Parameters", "Return hyperparameter inout_hyperparameter : float Inout hyperparameter random_state : np.random.RandomState", "time is {str(end)} Elapsed time is {str(start - end)}\" )", "f\"Beginning preprocessing of transition probabilities for {total_edges} edges\" ) if", "walks per source. Default is 10. walk_length: int Length of", "to choose from) and will bias your resulting Word2Vec embedding", "last element of the list, default it to the max_walk_length.", "1 if i == 0 and degree < percentile: return", "int Dimensionality of the word vectors. Default is 128. window_size", "float = 1.0, dimensions: int = 128, window_size: int =", "10. workers : int Use these many worker threads to", "the model. Default is 8. iterations : int Number of", "alias_edges[(prev, current)][1], self.random_state, ) ] walk.append(next) else: break return walk", "0 current_edge = 0 quotient = int(total_edges / 10) logging.info(", "else: larger.append(i) while len(smaller) > 0 and len(larger) > 0:", "of the word vectors. Default is 128. window_size : int", "# This is to avoid having your random walks be", "/ p ) elif graph.has_edge(destination_neighbor, source): unnormalized_probs.append( graph[destination][destination_neighbor].get(\"weight\", 1) )", "that should be used. If the degree is less than", "element of the list, default it to the max_walk_length. If", "the intent of the code is to default the bottom", "# if we are below the first percentile in the", "and interpolate the inner 70% linearly from min to max.", "def _assert_is_nonnegative_float(name: str, value: float): if not isinstance(value, float): raise", "import Word2Vec walks = [list(map(str, walk)) for walk in walks]", "caller create an appropriate Graph or \" \"DiGraph that represents", ") if is_directed: for edge in graph.edges(): current_edge += 1", "percentile: new_walk_length = max_walk_length * ((i * 0.1) + 0.2)", "of walks as the high degree nodes, the low degree", "int\") if value <= 0: raise ValueError(f\"{name} must be >", "for the \" \"purposes of this embedding\" ) _assert_is_positive_int(\"num_walks\", num_walks)", "walks dominating your higher degree nodes. random_seed : int Seed", "bool = True, random_seed: Optional[int] = None, ) -> Tuple[np.array,", "nodes have the # same number of walks as the", "\"\"\" def __init__( self, graph: nx.Graph, return_hyperparameter: float, inout_hyperparameter: float,", "TypeError(\"graph must be a networkx Graph or DiGraph\") if graph.is_multigraph():", "from min to max. # This is to avoid having", "i == 0 and degree < percentile: return 1 #", "for x in range(20, 90, 10)] ) for walk_iteration in", "than the last element of the list, default it to", "self._get_alias_edge(edge[1], edge[0]) logging.info(f\"Completed preprocessing of transition probabilities for edges\") self.alias_nodes", "is 10. walk_length: int Length of walk per source. Default", "0: raise ValueError(f\"{name} must be > 0\") def _assert_is_nonnegative_float(name: str,", "len(graph.nodes()) bucket = 0 current_node = 0 quotient = int(total_nodes", "Method used in this functionality can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/", "Simulate a random walk starting from start node. \"\"\" graph", "contributors. # Licensed under the MIT License. import logging import", "default to 1. num_walks : int Number of walks per", "the percentiles used if new_walk_length < 1: new_walk_length = 1", "handles the multi-edges (i.e. aggregate weights or take last edge", "random output. Note that for a fully deterministically-reproducible run, you", "gradient descent (SGD) interpolate_walk_lengths_by_node_degree : bool Use a dynamic walk", "self._get_alias_edge(edge[0], edge[1]) else: for edge in graph.edges(): current_edge += 1", "create an appropriate Graph or \" \"DiGraph that represents the", "node is in the bottom 20 percentile, default to a", "\"\"\" graph = self.graph walks = [] nodes = list(graph.nodes())", "per source. Default is 80. return_hyperparameter : float Return hyperparameter", "current_edge += 1 if current_edge > bucket * quotient: bucket", "): \"\"\" Simulate a random walk starting from start node.", "/ q ) norm_const = sum(unnormalized_probs) normalized_probs = [float(u_prob) /", "graph. Will follow the word2vec algorithm to create the embedding.", "= [start_node] # Percentiles will be provided if we are", "is to default the bottom 20% of to a minimal", "walk starting from start node. \"\"\" graph = self.graph alias_nodes", "the random walks. \"\"\" graph = self.graph is_directed = self.is_directed", "int = 10, workers: int = 8, iterations: int =", "nx.Graph A networkx graph return_hyperparameter : float Return hyperparameter inout_hyperparameter", "int = 10, walk_length: int = 80, return_hyperparameter: float =", "int = 8, iterations: int = 1, interpolate_walk_lengths_by_node_degree: bool =", "time {str(start)}\") node2vec_graph._preprocess_transition_probabilities() logging.info(f\"Simulating walks on graph at time {str(time.time())}\")", "1: new_walk_length = 1 return math.floor(new_walk_length) def _simulate_walks( self, num_walks:", "the embedding for each node. The tuple also contains a", "will produce random results \"\"\" def __init__( self, graph: nx.Graph,", "a minimal walk length, default the top 10% to a", "References ---------- .. [1] <NAME> and <NAME> \"node2vec: Scalable Feature", "\"\"\" _preconditions( graph, num_walks, walk_length, return_hyperparameter, inout_hyperparameter, dimensions, window_size, workers,", "return sampled_probabilities, alias def _alias_draw( probabilities: List[float], alias: List[float], random_state:", "can be found at https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ References ---------- .. [1] <NAME>", "above the last percentile if not new_walk_length: new_walk_length = max_walk_length", "compared to the high degree nodes. This will result in", "int, window_size: int, workers: int, iterations: int, interpolate_walk_lengths_by_node_degree: bool, ):", "alias[large] < 1.0: smaller.append(large) else: larger.append(large) return sampled_probabilities, alias def", "= self._get_walk_length_interpolated( degree, degree_percentiles, walk_length ) while len(walk) < walk_length:", "): \"\"\" Repeatedly simulate random walks from each node. \"\"\"", "f\"Beginning preprocessing of transition probabilities for {total_nodes} vertices\" ) for", "list, default to a walk length of 1 if i", "weight). If the graph is unweighted, the weight of each", "graph = self.graph walks = [] nodes = list(graph.nodes()) degree_percentiles:", "avoid having your random walks be dominated by low degree", "break # the degree is above the last percentile if", "= sum(unnormalized_probs) normalized_probs = [ float(u_prob) / norm_const for u_prob", "source. Default is 10. walk_length: int Length of walk per", "window=window_size, min_count=0, sg=1, # Training algorithm: 1 for skip-gram; otherwise" ]
[ "current_battle(self): return [i for i in self.battles if i.room ==", "re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) > 0 and self.room in", "elif message[1] == \"c\": self.battle_message(message) elif message[1] == \"challstr\": self.login(message)", "if message[1] == \"turn\" or message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")()", "\"name\": self.username, \"pass\": self.password, \"challengekeyid\": key, \"challenge\": challenge } data", "self.symbol: messages = [\"\"] + messages # now the list", "self.raw(message) elif message[1] == \"c\": self.battle_message(message) elif message[1] == \"challstr\":", "the list has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \"", "message[2].lower()) self.rooms_joined.append(room) elif message[1] == \"raw\": self.raw(message) elif message[1] ==", "\"ou\", \"ubers\", \"uu\", \"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def", "self.battles = [] self.plugins = plugins self.rooms_joined = [] self.log", "if i.room == self.room][0] def battle(self, message): message[1] = re.sub(r'[^A-z0-9]',", "self.rooms_joined.append(room) elif message[1] == \"raw\": self.raw(message) elif message[1] == \"c\":", "'', message[2].lower()) self.rooms_joined.append(room) elif message[1] == \"raw\": self.raw(message) elif message[1]", "Thread from battle import Battle import commands import traceback import", "== self.symbol: messages = [\"\"] + messages # now the", "self.connect() except SystemExit: return sys.exit() def message(self, messages): timestamp =", "wrong.\".format(room)) def login(self, message): key = message[2] challenge = message[3]", "self.battle_message(message) elif message[1] == \"challstr\": self.login(message) elif message[1] == \"updateuser\":", "\"getassertion\", \"userid\": self.username, \"challengekeyid\": key, \"challenge\": challenge } data =", "== \"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i", "message[4]: args = message[4].split(\"{} \".format(cmd))[1] else: args = [] command", "\"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username) def join(self,", "\"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\": key, \"challenge\": challenge } data", "self.room, user)).start()) def battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '', messages[2])", "'', messages[2]) if messages[3][0] == self.symbol: messages = [\"\"] +", "def __init__(self, username, password, server, admins, rooms, symbol, avatar, plugins,", "\"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username) def join(self, room):", "five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \" + user)).start()) def", "print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass def update_battle(self, messages): data =", "user): cmd = message[4].split(self.symbol)[1].split(\" \")[0] try: if \" \" in", "data[\"assertion\"])) def disconnect(self): self.ws = None sys.exit() def start(self): try:", "i in self.battles if i.room == self.room][0] def battle(self, message):", "def __str__(self): return \"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join {}\".format(room)) def", "update_battle(self, messages): data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0: who", "data = requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data =", "is \") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if \">\"", "\"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room,", "server self.admins = admins self.rooms = rooms self.symbol = symbol", "pass def update_battle(self, messages): data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) >", "'', message[1]) if message[1] == \"turn\" or message[1] == \"start\":", "in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) == 0:", "args = message[4].split(\"{} \".format(cmd))[1] else: args = [] command =", "data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data = { \"act\": \"login\",", "\"challstr\": self.login(message) elif message[1] == \"updateuser\": if not self.joined_all_rooms: for", "messages[3][0] == self.symbol: messages = [\"\"] + messages # now", "= json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def", "recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self, message):", "len(data) == 0: # new battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW", "= [\"\"] + messages # now the list has five", "class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__),", "match_line = [x for x in self.plugins if re.match(x.match_line, messages[4],", "= username self.password = password self.joined_all_rooms = False self.avatar =", "else: args = [] command = getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(),", "self.avatar = avatar self.server = server self.admins = admins self.rooms", "self.last_message = {} self.i = 0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room", "= message[1:] except: self.room = \"\" # lobby message =", "= [\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\",", "= server self.admins = admins self.rooms = rooms self.symbol =", "= admins self.rooms = rooms self.symbol = symbol self.battles =", "self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws = None sys.exit() def", "self.start_time = float(time.time()) self.commands = [] self.last_message = {} self.i", "for i in self.battles if i.room == self.room][0] def battle(self,", "= message[4].split(\"{} \".format(cmd))[1] else: args = [] command = getattr(commands,", "== self.room][0] def battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '', message[1])", "organize imports # organize from websocket import create_connection from threading", "= symbol self.battles = [] self.plugins = plugins self.rooms_joined =", "= { \"act\": \"login\", \"name\": self.username, \"pass\": self.password, \"challengekeyid\": key,", "except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self, message): key =", "message): message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if message[1] == \"turn\"", "= Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws =", "plugins self.rooms_joined = [] self.log = log self.tiers = [\"randombattle\",", "= \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self, messages): user", "message[2] challenge = message[3] if self.password == \"\": data =", "= { \"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\": key, \"challenge\": challenge", "__str__(self): return \"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self):", "json from fractions import Fraction import random import time import", "data = requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"]))", "= None sys.exit() def start(self): try: self.connect() except SystemExit: return", "== \"c\": self.battle_message(message) elif message[1] == \"challstr\": self.login(message) elif message[1]", "> 1: if message[1] == \"c:\": self.message(message) self.last_message[self.room] = message", "import time import sys import re import os from learn", "in self.battles if self.room in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1)", "\"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username, password,", "in [x.room for x in self.battles] and len(message) > 1:", "raw(self, messages): if self.rooms[self.i] not in self.rooms_joined and \"infobox\" in", "== \"updateuser\": if not self.joined_all_rooms: for room in self.rooms: self.join(room)", "in self.rooms: self.join(room) self.joined_all_rooms = True elif message[1] == \"request\":", "self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self, message): key = message[2] challenge", "getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError,", "{},0,{}\".format(self.username, data.text)) else: data = { \"act\": \"login\", \"name\": self.username,", "print(message.encode(\"utf-8\", \"ignore\")) try: if \">\" in self.last_message: self.room = message[1:]", "if x == match_line[0]][0] if self.room == \"lobby\": self.room =", "True: messages = [x for x in self.ws.recv().split(\"\\n\")] for message", "{} self.i = 0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room = \"\"", "create_connection from threading import Thread from battle import Battle import", "= message elif message[1] == \"title\": room = re.sub(r' ',", "== \"turn\" or message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room],", "plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message,", "a mystery command! (\\\"{}\\\" is not recognized)\".format(room, cmd)) except: print(traceback.print_exc())", "+ 1: self.i += 1 def update(self): [self.join(room) for room", "x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) >", "self.ws.recv().split(\"\\n\")] for message in messages: print(\"it is \") print(self.rooms_joined) if", "= json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0: who = list(data[\"challengesFrom\"].keys())[0] tier", "\"pass\": self.password, \"challengekeyid\": key, \"challenge\": challenge } data = requests.post(self.url,", "= re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] == self.symbol: messages =", "= getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except", "self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if \">\" in self.last_message: self.room =", "message[4].split(self.symbol)[1].split(\" \")[0] try: if \" \" in message[4]: args =", "in self.rooms_joined and \"infobox\" in messages[2]: if self.rooms[self.i] == \"lobby\":", "> 0: who = list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if tier", "room = re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif message[1] ==", "\"turn\" or message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message)", "\"title\": room = re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif message[1]", "symbol self.battles = [] self.plugins = plugins self.rooms_joined = []", "\"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username)", "username, password, server, admins, rooms, symbol, avatar, plugins, log): self.start_time", "= json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws = None", "def current_battle(self): return [i for i in self.battles if i.room", "self.battles if i.room == self.room][0] battle.run(message) if len(message) > 1:", "disconnect(self): self.ws = None sys.exit() def start(self): try: self.connect() except", "list has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \" +", "pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username, password, server,", "admins self.rooms = rooms self.symbol = symbol self.battles = []", "self.battles if i.room == self.room][0] def battle(self, message): message[1] =", "self.room in [x.room for x in self.battles] and len(message) >", "rooms self.symbol = symbol self.battles = [] self.plugins = plugins", "room in self.rooms] def request(self, messages): data = [x for", "if len(message) > 1: if message[1] == \"c:\": self.message(message) self.last_message[self.room]", "\") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if \">\" in", "= data[\"challengesFrom\"][who] if tier in self.tiers: if \"random\" not in", "message[1] == \"c:\": self.message(message) self.last_message[self.room] = message elif message[1] ==", "def battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0]", "went wrong.\".format(room)) def login(self, message): key = message[2] challenge =", "\"challengekeyid\": key, \"challenge\": challenge } data = requests.get(self.url, data=data) self.ws.send(\"|/trn", "[] command = getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room,", "in self.rooms_joined and messages[4][0] == self.symbol: if self.room == \"lobby\":", "x in self.plugins if x == match_line[0]][0] if self.room ==", "room in self.rooms: self.join(room) self.joined_all_rooms = True elif message[1] ==", "else: data = { \"act\": \"login\", \"name\": self.username, \"pass\": self.password,", "def raw(self, messages): if self.rooms[self.i] not in self.rooms_joined and \"infobox\"", "import traceback import requests import inspect import json from fractions", "= [] command = getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self)", "data[\"challengesFrom\"][who] if tier in self.tiers: if \"random\" not in tier:", "[] self.last_message = {} self.i = 0 self.url = \"http://play.pokemonshowdown.com/action.php\"", "\"\" self.username = username self.password = password self.joined_all_rooms = False", "message(self, messages): timestamp = int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined)", "if self.room in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data)", "def battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if message[1]", "battle import Battle import commands import traceback import requests import", "= 0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username =", "in self.rooms] def request(self, messages): data = [x for x", "x in self.battles if self.room in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\",", "len(match_line) > 0 and self.room in self.rooms_joined: plugin = [x", "def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages = [x", "def update_battle(self, messages): data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0:", "from battle import Battle import commands import traceback import requests", "0 and self.room in self.rooms_joined: plugin = [x for x", "plugin.run(message, self.last_message[self.room]))) def command(self, message, room, user): cmd = message[4].split(self.symbol)[1].split(\"", "has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \" + user)).start())", "\"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return [i", "= avatar self.server = server self.admins = admins self.rooms =", "in message[4]: args = message[4].split(\"{} \".format(cmd))[1] else: args = []", "json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username, password, server, admins, rooms,", "self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i + 1: self.i += 1", "self.i = 0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username", "timestamp = int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined) match_line =", "for x in self.ws.recv().split(\"\\n\")] for message in messages: print(\"it is", "float(time.time()) self.commands = [] self.last_message = {} self.i = 0", "for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line)", "args=(self.room, plugin, messages)).start()) if self.room in self.rooms_joined and messages[4][0] ==", "fractions import Fraction import random import time import sys import", "== \"raw\": self.raw(message) elif message[1] == \"c\": self.battle_message(message) elif message[1]", "self.room in self.rooms_joined and messages[4][0] == self.symbol: if self.room ==", "self.rooms] def request(self, messages): data = [x for x in", "self.join(room) self.joined_all_rooms = True elif message[1] == \"request\": self.request(message) elif", "if self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if \">\" in self.last_message: self.room", "= message[4].split(self.symbol)[1].split(\" \")[0] try: if \" \" in message[4]: args", "list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if tier in self.tiers: if \"random\"", "[x for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if", "now the list has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \"", "message[1] == \"request\": self.request(message) elif message[1] == \"updatechallenges\": self.update_battle(message) else:", "\"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i +", "\"ignore\")) try: if \">\" in self.last_message: self.room = message[1:] except:", "self.rooms_joined: plugin = [x for x in self.plugins if x", "== \"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room,", "it's a mystery command! (\\\"{}\\\" is not recognized)\".format(room, cmd)) except:", "if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) > 0 and self.room", "self.last_message[self.room] = message elif message[1] == \"title\": room = re.sub(r'", "sys import re import os from learn import Learn class", "self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass def update_battle(self, messages):", "update(self): [self.join(room) for room in self.rooms] def request(self, messages): data", "battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if message[1] ==", "self.room = message[1:] except: self.room = \"\" # lobby message", "user)).start()) def battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '', messages[2]) if", "= message.split(\"|\") # battles if self.room in [x.room for x", "def plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self,", "0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username = username", "messages): user = re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] == self.symbol:", "import Thread from battle import Battle import commands import traceback", "message[1] == \"turn\" or message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")() else:", "+= 1 def update(self): [self.join(room) for room in self.rooms] def", "import create_connection from threading import Thread from battle import Battle", "# organize imports # organize from websocket import create_connection from", "messages)).start()) if self.room in self.rooms_joined and messages[4][0] == self.symbol: if", "if self.rooms[self.i] not in self.rooms_joined and \"infobox\" in messages[2]: if", "if messages[3][0] == self.symbol: messages = [\"\"] + messages #", "[x for x in self.battles if self.room in str(x)] battle_tier", "if self.room in self.rooms_joined and messages[4][0] == self.symbol: if self.room", "team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws", "message): key = message[2] challenge = message[3] if self.password ==", "self.room = \"\" # lobby message = message.split(\"|\") # battles", "traceback import requests import inspect import json from fractions import", "who = list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if tier in self.tiers:", "len(data[\"challengesFrom\"].keys()) > 0: who = list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if", "== \"\": data = { \"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\":", "self.joined_all_rooms = True elif message[1] == \"request\": self.request(message) elif message[1]", "\"challenge\": challenge } data = requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1])", "if not self.joined_all_rooms: for room in self.rooms: self.join(room) self.joined_all_rooms =", "password, server, admins, rooms, symbol, avatar, plugins, log): self.start_time =", "in self.rooms_joined: plugin = [x for x in self.plugins if", "self.commands = [] self.last_message = {} self.i = 0 self.url", "len(message) > 1: if message[1] == \"c:\": self.message(message) self.last_message[self.room] =", "try: self.connect() except SystemExit: return sys.exit() def message(self, messages): timestamp", "\"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join {}\".format(room))", "def join(self, room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return [i for", "self.room][0] battle.run(message) if len(message) > 1: if message[1] == \"c:\":", "= [x for x in self.plugins if x == match_line[0]][0]", "message[1] == \"updateuser\": if not self.joined_all_rooms: for room in self.rooms:", "= \"\" self.username = username self.password = password self.joined_all_rooms =", "command = getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command))", "print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self, message): key = message[2]", "command! (\\\"{}\\\" is not recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went", "messages): if self.rooms[self.i] not in self.rooms_joined and \"infobox\" in messages[2]:", "{},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws = None sys.exit() def start(self):", "random import time import sys import re import os from", "\"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username) def", "self.joined_all_rooms: for room in self.rooms: self.join(room) self.joined_all_rooms = True elif", "= list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if tier in self.tiers: if", "organize from websocket import create_connection from threading import Thread from", "\"anythinggoes\", \"battlespotsingles\"] def __str__(self): return \"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join", "\"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self, messages): user =", "(IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a mystery command! (\\\"{}\\\"", "[x for x in self.ws.recv().split(\"\\n\")] for message in messages: print(\"it", "\"c\": self.battle_message(message) elif message[1] == \"challstr\": self.login(message) elif message[1] ==", "= [] self.plugins = plugins self.rooms_joined = [] self.log =", "and \"infobox\" in messages[2]: if self.rooms[self.i] == \"lobby\": self.rooms[self.i] =", "for x in self.plugins if x == match_line[0]][0] if self.room", "re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] == self.symbol: messages = [\"\"]", "1: if message[1] == \"c:\": self.message(message) self.last_message[self.room] = message elif", "while True: messages = [x for x in self.ws.recv().split(\"\\n\")] for", "plugin = [x for x in self.plugins if x ==", "\"r\").read()) def __init__(self, username, password, server, admins, rooms, symbol, avatar,", "elif message[1] == \"raw\": self.raw(message) elif message[1] == \"c\": self.battle_message(message)", "match_line[0]][0] if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room,", "= \"\" # lobby message = message.split(\"|\") # battles if", "== \"challstr\": self.login(message) elif message[1] == \"updateuser\": if not self.joined_all_rooms:", "in self.battles] and len(message) > 1: battle = [i for", "= re.sub(r'[^A-z0-9]', '', message[1]) if message[1] == \"turn\" or message[1]", "cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self, message): key", "False self.avatar = avatar self.server = server self.admins = admins", "self.symbol: if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages,", "\"random\" not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept", "= [] self.last_message = {} self.i = 0 self.url =", "import Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams", "and len(message) > 1: battle = [i for i in", "self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self, messages):", "battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] ==", "message[4].split(\"{} \".format(cmd))[1] else: args = [] command = getattr(commands, \"command_{}\".format(cmd),", "create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages = [x for x in self.ws.recv().split(\"\\n\")]", "= [x for x in self.battles if self.room in str(x)]", "so it's a mystery command! (\\\"{}\\\" is not recognized)\".format(room, cmd))", "def login(self, message): key = message[2] challenge = message[3] if", "key, \"challenge\": challenge } data = requests.post(self.url, data=data) data =", "import random import time import sys import re import os", "else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message,", "len(self.rooms) > self.i + 1: self.i += 1 def update(self):", "username self.password = password self.joined_all_rooms = False self.avatar = avatar", "except: self.room = \"\" # lobby message = message.split(\"|\") #", "len(message) > 1: battle = [i for i in self.battles", "password self.joined_all_rooms = False self.avatar = avatar self.server = server", "not recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def login(self,", "challenge } data = requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else:", "args=(messages, self.room, \" \" + user)).start()) def raw(self, messages): if", "server, admins, rooms, symbol, avatar, plugins, log): self.start_time = float(time.time())", "self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages", "import Fraction import random import time import sys import re", "messages: print(\"it is \") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\")) try:", "for room in self.rooms] def request(self, messages): data = [x", "messages # now the list has five elements. self.commands.append(Thread(target=self.command, args=(messages,", "log): self.start_time = float(time.time()) self.commands = [] self.last_message = {}", "= [] self.log = log self.tiers = [\"randombattle\", \"ou\", \"ubers\",", "== \"title\": room = re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif", "\".format(cmd))[1] else: args = [] command = getattr(commands, \"command_{}\".format(cmd), __name__)(args,", "print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a mystery command! (\\\"{}\\\" is not", "imports # organize from websocket import create_connection from threading import", "\"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username,", "== 0: # new battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\")", "Battle import commands import traceback import requests import inspect import", "inspect import json from fractions import Fraction import random import", "messages[4][0] == self.symbol: if self.room == \"lobby\": self.room = \"\"", "\" + user)).start()) def raw(self, messages): if self.rooms[self.i] not in", "+ messages # now the list has five elements. self.commands.append(Thread(target=self.command,", "self.room).group(1) if len(data) == 0: # new battle self.battles.append(Battle(battle_tier, self.room,", "try: if \">\" in self.last_message: self.room = message[1:] except: self.room", "re import os from learn import Learn class Bot: pokedex", "start(self): try: self.connect() except SystemExit: return sys.exit() def message(self, messages):", "return [i for i in self.battles if i.room == self.room][0]", "elif message[1] == \"request\": self.request(message) elif message[1] == \"updatechallenges\": self.update_battle(message)", "# TODO: # organize imports # organize from websocket import", "self.last_message: self.room = message[1:] except: self.room = \"\" # lobby", "= re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif message[1] == \"raw\":", "self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data = { \"act\": \"login\", \"name\":", "is not recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room)) def", "login(self, message): key = message[2] challenge = message[3] if self.password", "self.joined_all_rooms = False self.avatar = avatar self.server = server self.admins", "self.battles[-1].run(messages) else: pass def update_battle(self, messages): data = json.loads(messages[2]) if", "[x.room for x in self.battles] and len(message) > 1: battle", "x in self.ws.recv().split(\"\\n\")] for message in messages: print(\"it is \")", "command(self, message, room, user): cmd = message[4].split(self.symbol)[1].split(\" \")[0] try: if", "self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages = [x for x", "flags=re.IGNORECASE)] if len(match_line) > 0 and self.room in self.rooms_joined: plugin", "self.plugins if x == match_line[0]][0] if self.room == \"lobby\": self.room", "data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws", "__name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc())", "1: battle = [i for i in self.battles if i.room", "x == match_line[0]][0] if self.room == \"lobby\": self.room = \"\"", "[i for i in self.battles if i.room == self.room][0] battle.run(message)", "SystemExit: return sys.exit() def message(self, messages): timestamp = int(messages[2]) user", "\"c:\": self.message(message) self.last_message[self.room] = message elif message[1] == \"title\": room", "message[1]) if message[1] == \"turn\" or message[1] == \"start\": getattr(self.current_battle()[self.room],", "request(self, messages): data = [x for x in self.battles if", "Fraction import random import time import sys import re import", "if self.password == \"\": data = { \"act\": \"getassertion\", \"userid\":", "learn import Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read())", "import Battle import commands import traceback import requests import inspect", "message[1] == \"c\": self.battle_message(message) elif message[1] == \"challstr\": self.login(message) elif", "admins, rooms, symbol, avatar, plugins, log): self.start_time = float(time.time()) self.commands", "Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server))", "for x in self.battles] and len(message) > 1: battle =", "= requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def", "== \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def", "if \">\" in self.last_message: self.room = message[1:] except: self.room =", "if len(data[\"challengesFrom\"].keys()) > 0: who = list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who]", "not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who))", "= [i for i in self.battles if i.room == self.room][0]", "self.username = username self.password = password self.joined_all_rooms = False self.avatar", "print(\"it is \") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if", "\"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room", "== match_line[0]][0] if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin,", "not in self.rooms_joined and \"infobox\" in messages[2]: if self.rooms[self.i] ==", "self.password = password self.joined_all_rooms = False self.avatar = avatar self.server", "messages): timestamp = int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined) match_line", "{ \"act\": \"login\", \"name\": self.username, \"pass\": self.password, \"challengekeyid\": key, \"challenge\":", "i.room == self.room][0] def battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '',", "messages[2]: if self.rooms[self.i] == \"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if", "<gh_stars>0 # bot.py # TODO: # organize imports # organize", "{}\".format(room)) def current_battle(self): return [i for i in self.battles if", "def message(self, messages): timestamp = int(messages[2]) user = messages[3] print(self.room)", "def update(self): [self.join(room) for room in self.rooms] def request(self, messages):", "battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) == 0: # new", "# lobby message = message.split(\"|\") # battles if self.room in", "self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message, room, user): cmd =", "\"userid\": self.username, \"challengekeyid\": key, \"challenge\": challenge } data = requests.get(self.url,", "time import sys import re import os from learn import", "challenge } data = requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn", "args=(messages, self.room, user)).start()) def battle_message(self, messages): user = re.sub(r'[^A-z0-9]', '',", "elif message[1] == \"title\": room = re.sub(r' ', '', message[2].lower())", "messages[4], flags=re.IGNORECASE)] if len(match_line) > 0 and self.room in self.rooms_joined:", "mystery command! (\\\"{}\\\" is not recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something", "0: who = list(data[\"challengesFrom\"].keys())[0] tier = data[\"challengesFrom\"][who] if tier in", "# bot.py # TODO: # organize imports # organize from", "= json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username, password, server, admins,", "key = message[2] challenge = message[3] if self.password == \"\":", "elements. self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \" + user)).start()) def raw(self,", "= float(time.time()) self.commands = [] self.last_message = {} self.i =", "requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self):", "== self.symbol: if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command,", "message[1])(message) def plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def", "try: if \" \" in message[4]: args = message[4].split(\"{} \".format(cmd))[1]", "= False self.avatar = avatar self.server = server self.admins =", "data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws =", "(\\\"{}\\\" is not recognized)\".format(room, cmd)) except: print(traceback.print_exc()) self.ws.send(\"{}|Something went wrong.\".format(room))", "= [x for x in self.ws.recv().split(\"\\n\")] for message in messages:", "> 1: battle = [i for i in self.battles if", "avatar, plugins, log): self.start_time = float(time.time()) self.commands = [] self.last_message", "messages): data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0: who =", "in self.tiers: if \"random\" not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))]", "= int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined) match_line = [x", "import commands import traceback import requests import inspect import json", "[] self.plugins = plugins self.rooms_joined = [] self.log = log", "self.room in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) ==", "not self.joined_all_rooms: for room in self.rooms: self.join(room) self.joined_all_rooms = True", "threading import Thread from battle import Battle import commands import", "\"uu\", \"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self): return", "x in self.battles] and len(message) > 1: battle = [i", "TODO: # organize imports # organize from websocket import create_connection", "\">\" in self.last_message: self.room = message[1:] except: self.room = \"\"", "data.text)) else: data = { \"act\": \"login\", \"name\": self.username, \"pass\":", "if len(self.rooms) > self.i + 1: self.i += 1 def", "data = [x for x in self.battles if self.room in", "def command(self, message, room, user): cmd = message[4].split(self.symbol)[1].split(\" \")[0] try:", "websocket import create_connection from threading import Thread from battle import", "\")[0] try: if \" \" in message[4]: args = message[4].split(\"{}", "message in messages: print(\"it is \") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\",", "pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read())", "message, room, user): cmd = message[4].split(self.symbol)[1].split(\" \")[0] try: if \"", "self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start())", "self.rooms[self.i] == \"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) >", "json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username, data[\"assertion\"])) def disconnect(self): self.ws = None sys.exit()", "user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so", "message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self,", "else: pass def update_battle(self, messages): data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys())", "self.server = server self.admins = admins self.rooms = rooms self.symbol", "i.room == self.room][0] battle.run(message) if len(message) > 1: if message[1]", "in self.battles if i.room == self.room][0] def battle(self, message): message[1]", "for room in self.rooms: self.join(room) self.joined_all_rooms = True elif message[1]", "connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages = [x for", "getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room])))", "and messages[4][0] == self.symbol: if self.room == \"lobby\": self.room =", "\"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room in self.rooms_joined and", "# organize from websocket import create_connection from threading import Thread", "messages[3] print(self.room) print(self.rooms_joined) match_line = [x for x in self.plugins", "= password self.joined_all_rooms = False self.avatar = avatar self.server =", "if \" \" in message[4]: args = message[4].split(\"{} \".format(cmd))[1] else:", "= [x for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)]", "\"\": data = { \"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\": key,", "in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) > 0", "\"infobox\" in messages[2]: if self.rooms[self.i] == \"lobby\": self.rooms[self.i] = \"\"", "rooms, symbol, avatar, plugins, log): self.start_time = float(time.time()) self.commands =", "= plugins self.rooms_joined = [] self.log = log self.tiers =", "[\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"]", "self.last_message[self.room]))) def command(self, message, room, user): cmd = message[4].split(self.symbol)[1].split(\" \")[0]", "\"updateuser\": if not self.joined_all_rooms: for room in self.rooms: self.join(room) self.joined_all_rooms", "self.ws.send(\"{}|Luffy: so it's a mystery command! (\\\"{}\\\" is not recognized)\".format(room,", "\"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError):", "plugin, messages)).start()) if self.room in self.rooms_joined and messages[4][0] == self.symbol:", "self.battles if self.room in str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if", "self.rooms: self.join(room) self.joined_all_rooms = True elif message[1] == \"request\": self.request(message)", "cmd = message[4].split(self.symbol)[1].split(\" \")[0] try: if \" \" in message[4]:", "= \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i + 1: self.i", "in self.plugins if x == match_line[0]][0] if self.room == \"lobby\":", "sys.exit() def start(self): try: self.connect() except SystemExit: return sys.exit() def", "import re import os from learn import Learn class Bot:", "[self.join(room) for room in self.rooms] def request(self, messages): data =", "self.symbol = symbol self.battles = [] self.plugins = plugins self.rooms_joined", "in self.battles if i.room == self.room][0] battle.run(message) if len(message) >", "\"\" # lobby message = message.split(\"|\") # battles if self.room", "self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)] if len(match_line) > 0 and", "import json from fractions import Fraction import random import time", "elif message[1] == \"challstr\": self.login(message) elif message[1] == \"updateuser\": if", "\" \" + user)).start()) def raw(self, messages): if self.rooms[self.i] not", "from threading import Thread from battle import Battle import commands", "def request(self, messages): data = [x for x in self.battles", "if len(data) == 0: # new battle self.battles.append(Battle(battle_tier, self.room, self))", "room, user): cmd = message[4].split(self.symbol)[1].split(\" \")[0] try: if \" \"", "messages = [x for x in self.ws.recv().split(\"\\n\")] for message in", "import os from learn import Learn class Bot: pokedex =", "self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i + 1:", "if tier in self.tiers: if \"random\" not in tier: team", "== \"request\": self.request(message) elif message[1] == \"updatechallenges\": self.update_battle(message) else: pass", "json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self,", "} data = requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data", "> self.i + 1: self.i += 1 def update(self): [self.join(room)", "message[1] == \"title\": room = re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room)", "log self.tiers = [\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\", \"nu\", \"pu\",", "= \"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username = username self.password =", "and self.room in self.rooms_joined: plugin = [x for x in", "== \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if", "+ user)).start()) def raw(self, messages): if self.rooms[self.i] not in self.rooms_joined", "{ \"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\": key, \"challenge\": challenge }", "in messages[2]: if self.rooms[self.i] == \"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i])", "self.ws = None sys.exit() def start(self): try: self.connect() except SystemExit:", "self.i + 1: self.i += 1 def update(self): [self.join(room) for", "for message in messages: print(\"it is \") print(self.rooms_joined) if self.log:", "TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a mystery command! (\\\"{}\\\" is", "symbol, avatar, plugins, log): self.start_time = float(time.time()) self.commands = []", "room.strip().lower(), user.lower(), self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy:", "data = { \"act\": \"login\", \"name\": self.username, \"pass\": self.password, \"challengekeyid\":", "import inspect import json from fractions import Fraction import random", "return \"<Bot:{}>\".format(self.username) def join(self, room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return", "getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin, message):", "bot.py # TODO: # organize imports # organize from websocket", "re.sub(r' ', '', message[2].lower()) self.rooms_joined.append(room) elif message[1] == \"raw\": self.raw(message)", "== \"c:\": self.message(message) self.last_message[self.room] = message elif message[1] == \"title\":", "plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message, room, user):", "self.rooms = rooms self.symbol = symbol self.battles = [] self.plugins", "= log self.tiers = [\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\", \"nu\",", "True elif message[1] == \"request\": self.request(message) elif message[1] == \"updatechallenges\":", "self.url = \"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username = username self.password", "self.rooms_joined and \"infobox\" in messages[2]: if self.rooms[self.i] == \"lobby\": self.rooms[self.i]", "[i for i in self.battles if i.room == self.room][0] def", "[x for x in self.plugins if x == match_line[0]][0] if", "in self.last_message: self.room = message[1:] except: self.room = \"\" #", "message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message, room, user): cmd", "battle = [i for i in self.battles if i.room ==", "args = [] command = getattr(commands, \"command_{}\".format(cmd), __name__)(args, room.strip().lower(), user.lower(),", "self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room in", "None sys.exit() def start(self): try: self.connect() except SystemExit: return sys.exit()", "messages): data = [x for x in self.battles if self.room", "def disconnect(self): self.ws = None sys.exit() def start(self): try: self.connect()", "message[3] if self.password == \"\": data = { \"act\": \"getassertion\",", "# now the list has five elements. self.commands.append(Thread(target=self.command, args=(messages, self.room,", "lobby message = message.split(\"|\") # battles if self.room in [x.room", "self.message(message) self.last_message[self.room] = message elif message[1] == \"title\": room =", "i in self.battles if i.room == self.room][0] battle.run(message) if len(message)", "in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def", "import sys import re import os from learn import Learn", "== self.room][0] battle.run(message) if len(message) > 1: if message[1] ==", "self.log = log self.tiers = [\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\",", "self) self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's", "self.rooms_joined and messages[4][0] == self.symbol: if self.room == \"lobby\": self.room", "requests import inspect import json from fractions import Fraction import", "[] self.log = log self.tiers = [\"randombattle\", \"ou\", \"ubers\", \"uu\",", "join(self, room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return [i for i", "if message[1] == \"c:\": self.message(message) self.last_message[self.room] = message elif message[1]", "self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass def update_battle(self, messages): data", "= re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) == 0: # new battle", "self.password, \"challengekeyid\": key, \"challenge\": challenge } data = requests.post(self.url, data=data)", "= messages[3] print(self.room) print(self.rooms_joined) match_line = [x for x in", "messages = [\"\"] + messages # now the list has", "from learn import Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"),", "if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin,", "{}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True:", "return sys.exit() def message(self, messages): timestamp = int(messages[2]) user =", "1: self.i += 1 def update(self): [self.join(room) for room in", "', '', message[2].lower()) self.rooms_joined.append(room) elif message[1] == \"raw\": self.raw(message) elif", "\"http://play.pokemonshowdown.com/action.php\" self.room = \"\" self.username = username self.password = password", "if self.room in [x.room for x in self.battles] and len(message)", "self.ws.send(\"{}|{}\".format(room, command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a", "battles if self.room in [x.room for x in self.battles] and", "os from learn import Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__),", "key, \"challenge\": challenge } data = requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username,", "data = { \"act\": \"getassertion\", \"userid\": self.username, \"challengekeyid\": key, \"challenge\":", "self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start())", "message.split(\"|\") # battles if self.room in [x.room for x in", "data = json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0: who = list(data[\"challengesFrom\"].keys())[0]", "Learn class Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams =", "\"./data/pokemon_teams.json\"), \"r\").read()) def __init__(self, username, password, server, admins, rooms, symbol,", "for i in self.battles if i.room == self.room][0] battle.run(message) if", "message = message.split(\"|\") # battles if self.room in [x.room for", "tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self):", "self.room = \"\" self.username = username self.password = password self.joined_all_rooms", "if \"random\" not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm {}\".format(team))", "def start(self): try: self.connect() except SystemExit: return sys.exit() def message(self,", "print(self.rooms_joined) match_line = [x for x in self.plugins if re.match(x.match_line,", "self.username, \"challengekeyid\": key, \"challenge\": challenge } data = requests.get(self.url, data=data)", "\"login\", \"name\": self.username, \"pass\": self.password, \"challengekeyid\": key, \"challenge\": challenge }", "self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return [i for i in self.battles", "message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if message[1] == \"turn\" or", "{}\".format(who)) def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages =", "\" in message[4]: args = message[4].split(\"{} \".format(cmd))[1] else: args =", "except SystemExit: return sys.exit() def message(self, messages): timestamp = int(messages[2])", "self.admins = admins self.rooms = rooms self.symbol = symbol self.battles", "battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass def", "in self.ws.recv().split(\"\\n\")] for message in messages: print(\"it is \") print(self.rooms_joined)", "from websocket import create_connection from threading import Thread from battle", "in messages: print(\"it is \") print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\"))", "user = messages[3] print(self.room) print(self.rooms_joined) match_line = [x for x", "= {} self.i = 0 self.url = \"http://play.pokemonshowdown.com/action.php\" self.room =", "if self.room == \"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room,", "self.room in self.rooms_joined: plugin = [x for x in self.plugins", "battle.run(message) if len(message) > 1: if message[1] == \"c:\": self.message(message)", "\"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def plugin(self, room, plugin,", "= True elif message[1] == \"request\": self.request(message) elif message[1] ==", "print(self.room) print(self.rooms_joined) match_line = [x for x in self.plugins if", "elif message[1] == \"updateuser\": if not self.joined_all_rooms: for room in", "1 def update(self): [self.join(room) for room in self.rooms] def request(self,", "re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) == 0: # new battle self.battles.append(Battle(battle_tier,", "\"challengekeyid\": key, \"challenge\": challenge } data = requests.post(self.url, data=data) data", "if self.rooms[self.i] == \"lobby\": self.rooms[self.i] = \"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms)", "self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass def update_battle(self,", "message[1] == \"challstr\": self.login(message) elif message[1] == \"updateuser\": if not", "\"ubers\", \"uu\", \"ru\", \"nu\", \"pu\", \"lc\", \"anythinggoes\", \"battlespotsingles\"] def __str__(self):", "except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a mystery command!", "self.ws.send(\"|/utm {}\".format(team)) self.ws.send(\"|/accept {}\".format(who)) def connect(self): self.ws = create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while", "plugins, log): self.start_time = float(time.time()) self.commands = [] self.last_message =", "= create_connection(\"ws://{}/showdown/websocket\".format(self.server)) while True: messages = [x for x in", "self.login(message) elif message[1] == \"updateuser\": if not self.joined_all_rooms: for room", "sys.exit() def message(self, messages): timestamp = int(messages[2]) user = messages[3]", "self.room, \" \" + user)).start()) def raw(self, messages): if self.rooms[self.i]", "BATTLE\") self.battles[-1].run(messages) else: pass def update_battle(self, messages): data = json.loads(messages[2])", "re.sub(r'[^A-z0-9]', '', message[1]) if message[1] == \"turn\" or message[1] ==", "message[1] == \"raw\": self.raw(message) elif message[1] == \"c\": self.battle_message(message) elif", "= message[3] if self.password == \"\": data = { \"act\":", "user = re.sub(r'[^A-z0-9]', '', messages[2]) if messages[3][0] == self.symbol: messages", "messages[2]) if messages[3][0] == self.symbol: messages = [\"\"] + messages", "= \"\" self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room in self.rooms_joined", "avatar self.server = server self.admins = admins self.rooms = rooms", "self.password == \"\": data = { \"act\": \"getassertion\", \"userid\": self.username,", "int(messages[2]) user = messages[3] print(self.room) print(self.rooms_joined) match_line = [x for", "room): self.ws.send(\"|/join {}\".format(room)) def current_battle(self): return [i for i in", "\" \" in message[4]: args = message[4].split(\"{} \".format(cmd))[1] else: args", "# new battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else:", "new battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages) else: pass", "print(self.rooms_joined) if self.log: print(message.encode(\"utf-8\", \"ignore\")) try: if \">\" in self.last_message:", "self.username, \"pass\": self.password, \"challengekeyid\": key, \"challenge\": challenge } data =", "self.plugins = plugins self.rooms_joined = [] self.log = log self.tiers", "room, plugin, message): self.ws.send(\"{}|{}\".format(room, plugin.run(message, self.last_message[self.room]))) def command(self, message, room,", "} data = requests.post(self.url, data=data) data = json.loads(data.text.split(\"]\")[1]) self.ws.send(\"|/trn {},0,{}\".format(self.username,", "Bot: pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokedex.json\"), \"r\").read()) pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), \"./data/pokemon_teams.json\"),", "tier = data[\"challengesFrom\"][who] if tier in self.tiers: if \"random\" not", "= message[2] challenge = message[3] if self.password == \"\": data", "[\"\"] + messages # now the list has five elements.", "self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self, messages): user = re.sub(r'[^A-z0-9]',", "import requests import inspect import json from fractions import Fraction", "self.room][0] def battle(self, message): message[1] = re.sub(r'[^A-z0-9]', '', message[1]) if", "\"act\": \"login\", \"name\": self.username, \"pass\": self.password, \"challengekeyid\": key, \"challenge\": challenge", "if i.room == self.room][0] battle.run(message) if len(message) > 1: if", "= rooms self.symbol = symbol self.battles = [] self.plugins =", "0: # new battle self.battles.append(Battle(battle_tier, self.room, self)) print(\"NEW BATTLE\") self.battles[-1].run(messages)", "tier in self.tiers: if \"random\" not in tier: team =", "__init__(self, username, password, server, admins, rooms, symbol, avatar, plugins, log):", "\"challenge\": challenge } data = requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text))", "for x in self.battles if self.room in str(x)] battle_tier =", "message[1:] except: self.room = \"\" # lobby message = message.split(\"|\")", "self.rooms[self.i] not in self.rooms_joined and \"infobox\" in messages[2]: if self.rooms[self.i]", "\"raw\": self.raw(message) elif message[1] == \"c\": self.battle_message(message) elif message[1] ==", "self.battles] and len(message) > 1: battle = [i for i", "from fractions import Fraction import random import time import sys", "# battles if self.room in [x.room for x in self.battles]", "\"\" self.rooms_joined.append(self.rooms[self.i]) if len(self.rooms) > self.i + 1: self.i +=", "requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data = { \"act\":", "self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start()) if self.room in self.rooms_joined and messages[4][0]", "json.loads(messages[2]) if len(data[\"challengesFrom\"].keys()) > 0: who = list(data[\"challengesFrom\"].keys())[0] tier =", "\"lobby\": self.room = \"\" self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start()) def battle_message(self,", "challenge = message[3] if self.password == \"\": data = {", "user)).start()) def raw(self, messages): if self.rooms[self.i] not in self.rooms_joined and", "self.i += 1 def update(self): [self.join(room) for room in self.rooms]", "self.tiers = [\"randombattle\", \"ou\", \"ubers\", \"uu\", \"ru\", \"nu\", \"pu\", \"lc\",", "str(x)] battle_tier = re.search(\"battle-(.+)-(\\d+)\", self.room).group(1) if len(data) == 0: #", "self.rooms_joined = [] self.log = log self.tiers = [\"randombattle\", \"ou\",", "if len(match_line) > 0 and self.room in self.rooms_joined: plugin =", "command)) except (IndexError, TypeError): print(traceback.print_exc()) self.ws.send(\"{}|Luffy: so it's a mystery", "self.commands.append(Thread(target=self.command, args=(messages, self.room, \" \" + user)).start()) def raw(self, messages):", "> 0 and self.room in self.rooms_joined: plugin = [x for", "commands import traceback import requests import inspect import json from", "= requests.get(self.url, data=data) self.ws.send(\"|/trn {},0,{}\".format(self.username, data.text)) else: data = {", "self.tiers: if \"random\" not in tier: team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))] self.ws.send(\"|/utm", "message elif message[1] == \"title\": room = re.sub(r' ', '',", "or message[1] == \"start\": getattr(self.current_battle()[self.room], \"decide\")() else: getattr(self.current_battle()[self.room], message[1])(message) def" ]
[ "if ElPiGraph_kwargs[\"GPU\"] is True: try: import cupy except ImportError: raise", "X = model.points if spatial_key is None else model[spatial_key] DDRTree_kwargs", "Complex Intrinsic Dataset Geometry via ElPiGraph. Args: X: DxN, data", "Obtain the real part of the complex argument model[key_added] =", "ImportError( \"You need to install the package `simpleppt` and `igraph`.\"", "DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W, Q, C, objs =", "expression array. \"\"\" model = model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"])", "point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length +=", "spatial_key: Optional[str] = None, key_added: Optional[str] = \"nodes\", inplace: bool", ") x, y = [], [] x_length = 0 for", "\"rd_spatial\", dim: int = 2, inplace: bool = False, **kwargs,", "True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True: try:", "Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py", "tree model in-place. Returns: A tree, which contains the following", "ImportError: raise ImportError( \"You need to install the package `cupy`.\"", "-> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice(", "on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values", "Union[str, list], nodes_key: Optional[str] = \"nodes\", inplace: bool = False,", "array. \"\"\" padding = np.empty(edges.shape[0], int) * 2 padding[:] =", "key = [key] if isinstance(key, str) else key for sub_key", "1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree(", "else None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key:", "elpigraph except ImportError: raise ImportError( \"You need to install the", "_, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model", "= [key] if isinstance(key, str) else key for sub_key in", "the principal tree. key_added: The key under which to add", "nodes labels. Returns: A three-dims principal tree model, which contains", "method=\"line\", n_slices=n_points, vec=vec, center=center ) x, y = [], []", "range of 100 to 2000 for PPT approach. **kwargs: Other", ") map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model =", "the package `cupy`.\" \"\\nInstall cupy via `pip install cupy-cuda113`.\" )", "model.point_data[key_added] = ii return model if not inplace else None", "`'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, )", "edges between nodes in the principal tree. \"\"\" try: import", "the following properties: `tree.point_data[key]`, the gene expression array. \"\"\" model", ".ddrtree import DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice ####################################", "coordinates are model.points. key_added: The key under which to add", "tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return tree_model def changes_along_branch( model:", "[] x_length = 0 for slice, (point_i, point) in zip(slices,", "= np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index =", "for ElPiGraph approach. **kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For", "al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International", "model[spatial_key] if rd_method == \"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes,", "model. Args: nodes: The nodes in the principal tree. edges:", "\"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method ==", "are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True,", ") -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a principal elastic tree.", "properties: `tree_model.point_data[key_added]`, the nodes labels array. \"\"\" padding = np.empty(edges.shape[0],", "Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice( model=model,", "A simple principal tree algorithm, SIAM International Conference on Data", "simpleppt except ImportError: raise ImportError( \"You need to install the", "`pip install -U simpleppt`.\" \"\\nInstall igraph via `pip install -U", "MultiBlock]: slices, line_points, line = three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec,", "inplace else model X = model.points if spatial_key is None", "np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes", "SimplePPT: A simple principal tree algorithm, SIAM International Conference on", "the gene expression label. tree: A three-dims principal tree model", "the principal graph. Use a range of 10 to 100", "0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if", "corresponds to the gene expression. nodes_key: The key that corresponds", "[], [] x_length = 0 for slice, (point_i, point) in", "point_i == 0: x.append(0) else: point1 = line_points[point_i - 1].points.flatten()", "2000 for PPT approach. **kwargs: Other parameters used in simpleppt.ppt.", "Returns: A three-dims principal tree model, which contains the following", "\"nodes\", inplace: bool = False, ): \"\"\" Find the closest", "= np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree( X: np.ndarray,", "= False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model", "closest principal tree node to any point in the model", "== \"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method", "None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line", "= model.points if spatial_key is None else model[spatial_key] if rd_method", "to the coordinates of the point in the model. If", "np.empty(edges.shape[0], int) * 2 padding[:] = 2 edges_w_padding = np.vstack((padding,", "model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges)", "nodes in the principal tree. \"\"\" try: import elpigraph except", "Optional[str] = None, map_key: Union[str, list] = None, key_added: Optional[str]", "tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\")", "please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the principal", "the model through KDTree. Args: model: A reconstruct model contains", "approach. **kwargs: Other parameters used in simpleppt.ppt. For details, please", "\"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True: try: import", "`simpleppt` and `igraph`.\" \"\\nInstall simpleppt via `pip install -U simpleppt`.\"", "UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str] =", "pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing import", "= ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() )", "along a vector direction # #################################### def changes_along_line( model: Union[PolyData,", "(map_key is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True )", "= model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index = range(len(model_data.index)) tree =", "#################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list] =", "contains the following properties: `model.point_data[key_added]`, the nodes labels array. \"\"\"", "contains the following properties: `tree.point_data[key]`, the gene expression array. \"\"\"", "in-place. Returns: A tree, which contains the following properties: `tree.point_data[key]`,", "in the principal tree. \"\"\" try: import igraph import simpleppt", "the model. If spatial_key is None, the coordinates are model.points.", "the principal tree. edges: The edges between nodes in the", "n_points: int = 100, vec: Union[tuple, list] = (1, 0,", ".slice import euclidean_distance, three_d_slice #################################### # Changes along a vector", "install -U igraph`\" ) SimplePPT_kwargs = { \"seed\": 1, \"lam\":", "sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if not", "int = 50, inplace: bool = False, **kwargs, ) ->", "edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( \"`rd_method` value", "np.asarray(x), np.asarray(y), slices, line ################################# # Changes along the model", "bool = False, **kwargs, ): \"\"\" Find the closest principal", "= model.points if spatial_key is None else model[spatial_key] nodes_kdtree =", "model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1)", "= model_data.index model_data.index = range(len(model_data.index)) tree = tree.copy() if not", "cal_ncenter from .slice import euclidean_distance, three_d_slice #################################### # Changes along", "igraph`\" ) SimplePPT_kwargs = { \"seed\": 1, \"lam\": 10, }", "inplace=True) for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree", "-U igraph`\" ) SimplePPT_kwargs = { \"seed\": 1, \"lam\": 10,", "-> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a principal elastic tree. Reference:", "NumNodes=NumNodes, **kwargs) else: raise ValueError( \"`rd_method` value is wrong.\" \"\\nAvailable", "Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a principal elastic tree. Reference: Albergante", "Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str] = \"rd_spatial\",", "kwargs: Other parameters used in scipy.spatial.KDTree. Returns: A model, which", "model[spatial_key] DDRTree_kwargs = { \"maxIter\": 10, \"sigma\": 0.001, \"gamma\": 10,", "import Optional, Tuple, Union import numpy as np import pandas", "NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights =", "the principal tree. spatial_key: The key that corresponds to the", "tree_data.fillna(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return", "def SimplePPT_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, )", "igraph import simpleppt except ImportError: raise ImportError( \"You need to", "pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return tree_model def", "changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list] = None, n_points:", "simple principal tree. Reference: Mao et al. (2015), SimplePPT: A", "############################## # Changes along the branches # ############################## def ElPiGraph_tree(", "Args: nodes: The nodes in the principal tree. edges: The", "Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, map_key: Union[str, list] =", "raise ImportError( \"You need to install the package `simpleppt` and", "edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes,", "PolyData: \"\"\" Construct a principal tree model. Args: nodes: The", "see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the principal tree.", "DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing import Literal except", "the gene expression array. \"\"\" model = model.copy() model_data =", "zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0:", "KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii", "B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist()", "== 0: x.append(0) else: point1 = line_points[point_i - 1].points.flatten() point2", "= simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T,", "principal tree model, which contains the following properties: `tree_model.point_data[key_added]`, the", "algorithm, SIAM International Conference on Data Mining. Args: X: DxN,", "edges: np.ndarray, key_added: Optional[str] = \"nodes\", ) -> PolyData: \"\"\"", "as pv from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try:", "the coordinates of the point in the model. If spatial_key", "that corresponds to the coordinates of the nodes in the", "used in elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes:", "spatial_key is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _,", "np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice( model=model, method=\"line\",", "np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str] = \"nodes\", inplace:", "if not inplace else None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid],", "5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree,", "number of nodes of the principal graph. Use a range", "\"eps\": 0, \"dim\": dim, \"Lambda\": 5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]),", "Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes: int = 50, inplace: bool", "the coordinates are model.points. key_added: The key under which to", "tree, which contains the following properties: `tree.point_data[key]`, the gene expression", "wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes,", "nodes: The nodes in the principal tree. spatial_key: The key", "Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key: Optional[str] =", "is None else model[spatial_key] if rd_method == \"ElPiGraph\": nodes, edges", "node to any point in the model through KDTree. Args:", "model = model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key]", "DDRTree(X, **DDRTree_kwargs) # Obtain the real part of the complex", "of 10 to 100 for ElPiGraph approach. **kwargs: Other parameters", "DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice #################################### # Changes", "`tree_model.point_data[key_added]`, the nodes labels array. \"\"\" padding = np.empty(edges.shape[0], int)", "in elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The", "to install the package `cupy`.\" \"\\nInstall cupy via `pip install", "three-dims principal tree model, which contains the following properties: `tree_model.point_data[key_added]`,", "the following properties: `tree_model.point_data[key_added]`, the nodes labels array. \"\"\" padding", "gene expression. nodes_key: The key that corresponds to the coordinates", "import cupy except ImportError: raise ImportError( \"You need to install", "PolyData, UnstructuredGrid], PolyData]: model = model.copy() if not inplace else", "0.001, \"gamma\": 10, \"eps\": 0, \"dim\": dim, \"Lambda\": 5 *", "{ \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False, }", "If spatial_key is None, the coordinates are model.points. key_added: The", "approach. **kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please", "\"seed\": 1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree", "install the package `elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\"", "Find the closest principal tree node to any point in", "None, the coordinates are model.points. key_added: The key under which", "\"You need to install the package `simpleppt` and `igraph`.\" \"\\nInstall", "= False, ): \"\"\" Find the closest principal tree node", "10, \"sigma\": 0.001, \"gamma\": 10, \"eps\": 0, \"dim\": dim, \"Lambda\":", "ii return model if not inplace else None def map_gene_to_branch(", "x, y = [], [] x_length = 0 for slice,", "model contains the gene expression label. tree: A three-dims principal", "data matrix list. NumNodes: The number of nodes of the", "to the coordinates of the nodes in the tree. inplace:", "if not inplace else tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data", "padding = np.empty(edges.shape[0], int) * 2 padding[:] = 2 edges_w_padding", "################################# # Changes along the model shape # ################################# def", "-> PolyData: \"\"\" Construct a principal tree model. Args: nodes:", "rd_method == \"SimplePPT\": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else:", "nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model if not inplace", "contains the gene expression label. tree: A three-dims principal tree", "(2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry", "which to add the nodes labels. Returns: A three-dims principal", "def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list] = None,", "nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == \"SimplePPT\":", "10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes,", "tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return", "= line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1,", "None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return model", "isinstance(key, str) else key for sub_key in key: model_data[sub_key] =", "0, 0), center: Union[tuple, list] = None, ) -> Tuple[np.ndarray,", "Union[tuple, list] = None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]:", "of Complex Intrinsic Dataset Geometry via ElPiGraph. Args: X: DxN,", "details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the", "any point in the model through KDTree. Args: model: A", "`tree.point_data[key]`, the gene expression array. \"\"\" model = model.copy() model_data", "model.points if spatial_key is None else model[spatial_key] if rd_method ==", "Learning of Complex Intrinsic Dataset Geometry via ElPiGraph. Args: X:", "x.append(x_length) return np.asarray(x), np.asarray(y), slices, line ################################# # Changes along", "https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in the principal tree. edges:", "cupy except ImportError: raise ImportError( \"You need to install the", "International Conference on Data Mining. Args: X: DxN, data matrix", "in the model through KDTree. Args: model: A reconstruct model.", "the model shape # ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid],", "the tree. inplace: Updates tree model in-place. Returns: A tree,", "to install the package `simpleppt` and `igraph`.\" \"\\nInstall simpleppt via", "SimplePPT_kwargs = { \"seed\": 1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs) X", "nodes of the principal graph. Use a range of 100", "inplace: bool = False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid],", "construct_tree_model(nodes=nodes, edges=edges) if not (map_key is None): map_gene_to_branch( model=model, tree=tree_model,", "scipy.spatial import KDTree model = model.copy() if not inplace else", "For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes in", "# ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] =", "= \"nodes\", ) -> PolyData: \"\"\" Construct a principal tree", "Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph. Args:", "model.points. key_added: The key under which to add the nodes", "import KDTree model = model.copy() if not inplace else model", "git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = { \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\":", "50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a simple", "Generate a principal elastic tree. Reference: Albergante et al. (2020),", "SimplePPT_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) ->", "tree model contains the nodes label. key: The key that", "np.ndarray]: \"\"\" Generate a simple principal tree. Reference: Mao et", "tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return model if not inplace", "rd_method == \"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif", "= np.empty(edges.shape[0], int) * 2 padding[:] = 2 edges_w_padding =", "= elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges =", "for PPT approach. **kwargs: Other parameters used in simpleppt.ppt. For", "edges: The edges between nodes in the principal tree. key_added:", "x_length = 0 for slice, (point_i, point) in zip(slices, enumerate(line_points)):", "in the principal tree. edges: The edges between nodes in", "= ii return model if not inplace else None def", "tree if not inplace else None def construct_tree_model( nodes: np.ndarray,", "\"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes: int = 50,", "key: Union[str, list] = None, n_points: int = 100, vec:", "ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == \"SimplePPT\": nodes, edges =", "= np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0: x.append(0) else: point1", "0), center: Union[tuple, list] = None, ) -> Tuple[np.ndarray, np.ndarray,", "from scipy.spatial import KDTree model = model.copy() if not inplace", "elif rd_method == \"SimplePPT\": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs)", "= model.points if spatial_key is None else model[spatial_key] DDRTree_kwargs =", "following properties: `tree.point_data[key]`, the gene expression array. \"\"\" model =", "The key that corresponds to the gene expression. nodes_key: The", "-> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a simple principal tree. Reference:", "is None, the coordinates are model.points. key_added: The key under", "shape # ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str]", "list] = None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices,", "if rd_method == \"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs)", "`pip install cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs", "UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key: Optional[str] = \"nodes\",", "key that corresponds to the gene expression. nodes_key: The key", "\"dim\": dim, \"Lambda\": 5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs)", "properties: `model.point_data[key_added]`, the nodes labels array. \"\"\" from scipy.spatial import", "principal tree. key_added: The key under which to add the", "`'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model", "None, key_added: Optional[str] = \"rd_spatial\", dim: int = 2, inplace:", "\"You need to install the package `elpigraph-python`.\" \"\\nInstall elpigraph-python via", "is True: try: import cupy except ImportError: raise ImportError( \"You", "else: point1 = line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed", "np.arange(0, len(nodes), 1) return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid],", "model through KDTree. Args: model: A reconstruct model contains the", ") return nodes, edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes:", "numpy as np import pandas as pd import pyvista as", "#################################### # Changes along a vector direction # #################################### def", "X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R =", "corresponds to the coordinates of the point in the model.", "the nodes labels. Returns: A three-dims principal tree model, which", "inplace: Updates tree model in-place. Returns: A tree, which contains", "value is wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch(", "tree_data[sub_key].values return tree if not inplace else None def construct_tree_model(", "expression. nodes_key: The key that corresponds to the coordinates of", "tree model, which contains the following properties: `tree_model.point_data[key_added]`, the nodes", "contains the following properties: `tree_model.point_data[key_added]`, the nodes labels array. \"\"\"", "import euclidean_distance, three_d_slice #################################### # Changes along a vector direction", "tree: A three-dims principal tree model contains the nodes label.", "The nodes in the principal tree. spatial_key: The key that", "Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a simple principal tree. Reference: Mao", "try: from typing import Literal except ImportError: from typing_extensions import", "model: A reconstruct model contains the gene expression label. tree:", "key_added: Optional[str] = \"nodes\", inplace: bool = False, **kwargs, ):", "pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key] if isinstance(key, str) else key", "else None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str]", "cupy via `pip install cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X),", "model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key]", "= line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed", "principal elastic tree. Reference: Albergante et al. (2020), Robust and", "key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (map_key", "= nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model if not", "None, key_added: Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\",", "UnstructuredGrid try: from typing import Literal except ImportError: from typing_extensions", "\"lam\": 10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X,", "`model.point_data[key_added]`, the nodes labels array. \"\"\" from scipy.spatial import KDTree", "line ################################# # Changes along the model shape # #################################", "key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if not inplace else", "\"\"\" from scipy.spatial import KDTree model = model.copy() if not", "x.append(0) else: point1 = line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten()", "following properties: `tree_model.point_data[key_added]`, the nodes labels array. \"\"\" padding =", "= \"nodes\", inplace: bool = False, ): \"\"\" Find the", "the closest principal tree node to any point in the", "inplace else None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added:", "None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] =", "\"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"]", "nodes label. key: The key that corresponds to the gene", "list] = None, key_added: Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"]", "model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key: Optional[str]", "= KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] =", "complex argument model[key_added] = np.real(W).astype(np.float64) return model if not inplace", "package `simpleppt` and `igraph`.\" \"\\nInstall simpleppt via `pip install -U", "np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def", "the following properties: `model.point_data[key_added]`, the nodes labels array. \"\"\" from", "edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1)", "spatial_key: The key that corresponds to the coordinates of the", "**kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see:", "\"\\nInstall cupy via `pip install cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree(", "np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]:", "Dataset Geometry via ElPiGraph. Args: X: DxN, data matrix list.", "graph. Use a range of 10 to 100 for ElPiGraph", "model.copy() if not inplace else model X = model.points if", "**kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a simple principal", "\"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is", "details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the", "in the tree. inplace: Updates tree model in-place. Returns: A", "simpleppt`.\" \"\\nInstall igraph via `pip install -U igraph`\" ) SimplePPT_kwargs", "tree: PolyData, key: Union[str, list], nodes_key: Optional[str] = \"nodes\", inplace:", "True: try: import cupy except ImportError: raise ImportError( \"You need", "vec=vec, center=center ) x, y = [], [] x_length =", ") ElPiGraph_kwargs = { \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True,", "Optional[str] = \"nodes\", inplace: bool = False, ): \"\"\" Find", "tree. Reference: Albergante et al. (2020), Robust and Scalable Learning", "not inplace else model X = model.points if spatial_key is", "= \"nodes\", inplace: bool = False, **kwargs, ): \"\"\" Find", "principal graph. Use a range of 10 to 100 for", "= { \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False,", "to install the package `elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip install", "False, **kwargs, ): \"\"\" Find the closest principal tree node", "0 for slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value =", "Use a range of 10 to 100 for ElPiGraph approach.", "via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = { \"alpha\": 0.01,", "which to add the nodes labels. inplace: Updates model in-place.", "in the model through KDTree. Args: model: A reconstruct model", "Args: model: A reconstruct model. nodes: The nodes in the", "\"sigma\": 0.001, \"gamma\": 10, \"eps\": 0, \"dim\": dim, \"Lambda\": 5", "nodes labels array. \"\"\" padding = np.empty(edges.shape[0], int) * 2", "the nodes labels. inplace: Updates model in-place. kwargs: Other parameters", "the model through KDTree. Args: model: A reconstruct model. nodes:", "the nodes labels array. \"\"\" padding = np.empty(edges.shape[0], int) *", "1) return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str]", "return np.asarray(x), np.asarray(y), slices, line ################################# # Changes along the", "Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T, R) /", "= None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points,", "model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index = range(len(model_data.index)) tree = tree.copy()", "how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key in key: tree.point_data[sub_key] =", "* 2 padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model", "key_added: The key under which to add the nodes labels.", "10, \"eps\": 0, \"dim\": dim, \"Lambda\": 5 * X.shape[1], \"ncenter\":", "ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True: try: import cupy except ImportError:", "map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes,", "np.real(W).astype(np.float64) return model if not inplace else None ############################## #", "if point_i == 0: x.append(0) else: point1 = line_points[point_i -", "Optional[str] = \"nodes\", inplace: bool = False, **kwargs, ): \"\"\"", "contains the nodes label. key: The key that corresponds to", "spatial_key is None else model[spatial_key] DDRTree_kwargs = { \"maxIter\": 10,", "gene expression label. tree: A three-dims principal tree model contains", "= np.arange(0, len(nodes), 1) return tree_model def changes_along_branch( model: Union[PolyData,", "The key under which to add the nodes labels. Returns:", "} ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True: try: import cupy except", "**kwargs: Other parameters used in simpleppt.ppt. For details, please see:", "import elpigraph except ImportError: raise ImportError( \"You need to install", "bool = False, **kwargs, ): model = model.copy() if not", "map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list], nodes_key:", "matrix list. NumNodes: The number of nodes of the principal", "if spatial_key is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)", "nodes in the principal tree. spatial_key: The key that corresponds", "map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return model if", "to 2000 for PPT approach. **kwargs: Other parameters used in", "SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( \"`rd_method` value is wrong.\"", "of the point in the model. If spatial_key is None,", "A three-dims principal tree model contains the nodes label. key:", "nodes, edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key:", "of nodes of the principal graph. Use a range of", "y = [], [] x_length = 0 for slice, (point_i,", "tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None,", "> 0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes, edges def map_points_to_branch( model:", "the nodes in the tree. inplace: Updates tree model in-place.", "between nodes in the principal tree. key_added: The key under", "= np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0,", "which contains the following properties: `tree_model.point_data[key_added]`, the nodes labels array.", "model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key] if isinstance(key, str)", "= pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key in", "return nodes, edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray,", "Tuple, Union import numpy as np import pandas as pd", "of the complex argument model[key_added] = np.real(W).astype(np.float64) return model if", "and `igraph`.\" \"\\nInstall simpleppt via `pip install -U simpleppt`.\" \"\\nInstall", ") nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] #", "enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0: x.append(0)", "nodes = (np.dot(X.T, R) / R.sum(axis=0)).T B = ppt_tree.B edges", "list] = (1, 0, 0), center: Union[tuple, list] = None,", "change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0: x.append(0) else:", "principal tree. spatial_key: The key that corresponds to the coordinates", "model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, map_key: Union[str, list]", "inplace: bool = False, **kwargs, ): \"\"\" Find the closest", "not inplace else None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree:", "dtype=int).transpose() return nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes: int", "key that corresponds to the coordinates of the nodes in", "from typing_extensions import Literal from .ddrtree import DDRTree, cal_ncenter from", "as np import pandas as pd import pyvista as pv", "\"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W, Q,", "key_added: Optional[str] = \"nodes\", ) -> PolyData: \"\"\" Construct a", "objs = DDRTree(X, **DDRTree_kwargs) # Obtain the real part of", "edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == \"SimplePPT\": nodes,", "def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] =", "nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added]", "model if not inplace else None ############################## # Changes along", "the principal graph. Use a range of 100 to 2000", "from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing", "= None, n_points: int = 100, vec: Union[tuple, list] =", "\"nodes\", inplace: bool = False, **kwargs, ): \"\"\" Find the", "model_data[\"nodes_id\"] = model_data.index model_data.index = range(len(model_data.index)) tree = tree.copy() if", "nodes in the tree. inplace: Updates tree model in-place. Returns:", "model[key_added] = np.real(W).astype(np.float64) return model if not inplace else None", "False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model =", "matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges", "dimension=3) x_length += ed x.append(x_length) return np.asarray(x), np.asarray(y), slices, line", "import pandas as pd import pyvista as pv from pyvista", "add the nodes labels. inplace: Updates model in-place. kwargs: Other", "Use a range of 100 to 2000 for PPT approach.", "def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added:", "pd import pyvista as pv from pyvista import DataSet, MultiBlock,", "which contains the following properties: `tree.point_data[key]`, the gene expression array.", "import pyvista as pv from pyvista import DataSet, MultiBlock, PolyData,", "via ElPiGraph. Args: X: DxN, data matrix list. NumNodes: The", "properties: `tree.point_data[key]`, the gene expression array. \"\"\" model = model.copy()", "under which to add the nodes labels. inplace: Updates model", "1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree =", "x_length += ed x.append(x_length) return np.asarray(x), np.asarray(y), slices, line #################################", "\"\\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = {", "inplace: Updates model in-place. kwargs: Other parameters used in scipy.spatial.KDTree.", "Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes: int", "The edges between nodes in the principal tree. key_added: The", "= np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges", "key that corresponds to the coordinates of the point in", "MultiBlock, MultiBlock]: slices, line_points, line = three_d_slice( model=model, method=\"line\", n_slices=n_points,", "= SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( \"`rd_method` value is", "ImportError: raise ImportError( \"You need to install the package `elpigraph-python`.\"", "try: import igraph import simpleppt except ImportError: raise ImportError( \"You", "https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the principal tree. edges:", "UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str] = \"rd_spatial\", dim:", "see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the principal tree.", "-> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.copy() if not", "else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii = nodes_kdtree.query(np.asarray(X),", "else model[spatial_key] if rd_method == \"ElPiGraph\": nodes, edges = ElPiGraph_tree(X=X,", "along the branches # ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes:", "model = model.copy() if not inplace else model X =", "The number of nodes of the principal graph. Use a", "which contains the following properties: `model.point_data[key_added]`, the nodes labels array.", "the gene expression. nodes_key: The key that corresponds to the", "euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed x.append(x_length) return np.asarray(x), np.asarray(y),", "= 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a", "between nodes in the principal tree. \"\"\" try: import elpigraph", "in the model. If spatial_key is None, the coordinates are", "to add the nodes labels. inplace: Updates model in-place. kwargs:", "nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k]", "A reconstruct model contains the gene expression label. tree: A", "tree_data = pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key", ") tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (map_key is None):", "= { \"maxIter\": 10, \"sigma\": 0.001, \"gamma\": 10, \"eps\": 0,", "2, inplace: bool = False, **kwargs, ): model = model.copy()", "to add the nodes labels. Returns: A three-dims principal tree", "['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return", "################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None,", "int) * 2 padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T", "for sub_key in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if", "simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T, R)", "labels array. \"\"\" padding = np.empty(edges.shape[0], int) * 2 padding[:]", "For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in", "is None else model[spatial_key] DDRTree_kwargs = { \"maxIter\": 10, \"sigma\":", "to any point in the model through KDTree. Args: model:", "install the package `simpleppt` and `igraph`.\" \"\\nInstall simpleppt via `pip", "= three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec, center=center ) x, y", ") -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]: slices, line_points, line =", "\"`rd_method` value is wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" )", "nodes labels. inplace: Updates model in-place. kwargs: Other parameters used", "along the model shape # ################################# def changes_along_shape( model: Union[PolyData,", "= (np.dot(X.T, R) / R.sum(axis=0)).T B = ppt_tree.B edges =", "KDTree. Args: model: A reconstruct model. nodes: The nodes in", "\"SimplePPT\": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError(", "list], nodes_key: Optional[str] = \"nodes\", inplace: bool = False, ):", "= pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0,", "\"maxIter\": 10, \"sigma\": 0.001, \"gamma\": 10, \"eps\": 0, \"dim\": dim,", "None else model[spatial_key] DDRTree_kwargs = { \"maxIter\": 10, \"sigma\": 0.001,", "edges: The edges between nodes in the principal tree. \"\"\"", "except ImportError: from typing_extensions import Literal from .ddrtree import DDRTree,", "SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R", "expression label. tree: A three-dims principal tree model contains the", "PolyData, UnstructuredGrid try: from typing import Literal except ImportError: from", "Changes along the model shape # ################################# def changes_along_shape( model:", "the point in the model. If spatial_key is None, the", "if spatial_key is None else model[spatial_key] DDRTree_kwargs = { \"maxIter\":", "import Literal from .ddrtree import DDRTree, cal_ncenter from .slice import", "in simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The", "-U simpleppt`.\" \"\\nInstall igraph via `pip install -U igraph`\" )", "try: import cupy except ImportError: raise ImportError( \"You need to", "import numpy as np import pandas as pd import pyvista", "model, which contains the following properties: `model.point_data[key_added]`, the nodes labels", "model shape # ################################# def changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key:", "typing import Literal except ImportError: from typing_extensions import Literal from", "Intrinsic Dataset Geometry via ElPiGraph. Args: X: DxN, data matrix", "`pip install -U igraph`\" ) SimplePPT_kwargs = { \"seed\": 1,", "\"gamma\": 10, \"eps\": 0, \"dim\": dim, \"Lambda\": 5 * X.shape[1],", "model.points if spatial_key is None else model[spatial_key] DDRTree_kwargs = {", ") SimplePPT_kwargs = { \"seed\": 1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs)", "= { \"seed\": 1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs) X =", "principal tree model. Args: nodes: The nodes in the principal", "UnstructuredGrid], PolyData]: model = model.copy() if not inplace else model", "R.sum(axis=0)).T B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(),", "NumNodes: The number of nodes of the principal graph. Use", "the complex argument model[key_added] = np.real(W).astype(np.float64) return model if not", "edges between nodes in the principal tree. key_added: The key", "\"SimplePPT\"] = \"ElPiGraph\", NumNodes: int = 50, inplace: bool =", "nodes=nodes, spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if", "= elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"] #", "inplace=True ) return model if not inplace else None, tree_model", "np.ndarray, edges: np.ndarray, key_added: Optional[str] = \"nodes\", ) -> PolyData:", "= 0 for slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value", "= 2, inplace: bool = False, **kwargs, ): model =", "None, n_points: int = 100, vec: Union[tuple, list] = (1,", "= [], [] x_length = 0 for slice, (point_i, point)", "is wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model,", "ppt_tree.R nodes = (np.dot(X.T, R) / R.sum(axis=0)).T B = ppt_tree.B", "np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes),", "key_added: Optional[str] = \"rd_spatial\", dim: int = 2, inplace: bool", "mode=\"undirected\").get_edgelist() ) return nodes, edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid],", "10 to 100 for ElPiGraph approach. **kwargs: Other parameters used", "not inplace else tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data =", "slices, line ################################# # Changes along the model shape #", "edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree( X:", "key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index", "if not inplace else None def construct_tree_model( nodes: np.ndarray, edges:", "y.append(change_value) if point_i == 0: x.append(0) else: point1 = line_points[point_i", "None ############################## # Changes along the branches # ############################## def", "2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added]", "Optional[str] = \"rd_spatial\", dim: int = 2, inplace: bool =", "= tree.copy() if not inplace else tree tree_data = pd.DataFrame(tree[nodes_key],", "R = ppt_tree.R nodes = (np.dot(X.T, R) / R.sum(axis=0)).T B", "= \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes: int =", "tree algorithm, SIAM International Conference on Data Mining. Args: X:", "**kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.copy()", "MultiBlock, PolyData, UnstructuredGrid try: from typing import Literal except ImportError:", "part of the complex argument model[key_added] = np.real(W).astype(np.float64) return model", "= euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed x.append(x_length) return np.asarray(x),", "the principal tree. \"\"\" try: import elpigraph except ImportError: raise", "model=model, method=\"line\", n_slices=n_points, vec=vec, center=center ) x, y = [],", "model in-place. kwargs: Other parameters used in scipy.spatial.KDTree. Returns: A", "ElPiGraph. Args: X: DxN, data matrix list. NumNodes: The number", "ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R nodes =", "PolyData, key: Union[str, list], nodes_key: Optional[str] = \"nodes\", inplace: bool", "None, key_added: Optional[str] = \"nodes\", inplace: bool = False, **kwargs,", "model, which contains the following properties: `tree_model.point_data[key_added]`, the nodes labels", "in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i ==", "**kwargs) else: raise ValueError( \"`rd_method` value is wrong.\" \"\\nAvailable `rd_method`", "ImportError( \"You need to install the package `cupy`.\" \"\\nInstall cupy", "pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for sub_key in key:", "KDTree. Args: model: A reconstruct model contains the gene expression", "raise ImportError( \"You need to install the package `cupy`.\" \"\\nInstall", "point1 = line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed =", "to 100 for ElPiGraph approach. **kwargs: Other parameters used in", "install cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs )", "principal graph. Use a range of 100 to 2000 for", "# Obtain the real part of the complex argument model[key_added]", "nodes_key=key_added, inplace=True ) return model if not inplace else None,", "simple principal tree algorithm, SIAM International Conference on Data Mining.", "elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes", "in scipy.spatial.KDTree. Returns: A model, which contains the following properties:", "Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs)", "between nodes in the principal tree. \"\"\" try: import igraph", "= pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return tree_model", "in the principal tree. spatial_key: The key that corresponds to", "1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length", "graph. Use a range of 100 to 2000 for PPT", "(2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference", "map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None,", "ElPiGraph approach. **kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details,", "############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50, **kwargs,", "edges def map_points_to_branch( model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str]", "nodes labels array. \"\"\" from scipy.spatial import KDTree model =", "pyvista as pv from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid", "= False, **kwargs, ): model = model.copy() if not inplace", "in the principal tree. \"\"\" try: import elpigraph except ImportError:", "\"StoreGraphEvolution\": True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True:", "Union[str, list] = None, key_added: Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\",", "C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain the real part", "euclidean_distance, three_d_slice #################################### # Changes along a vector direction #", "tree. edges: The edges between nodes in the principal tree.", "try: import elpigraph except ImportError: raise ImportError( \"You need to", "0, \"dim\": dim, \"Lambda\": 5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), }", "np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index = range(len(model_data.index))", "range of 10 to 100 for ElPiGraph approach. **kwargs: Other", "# ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50,", "int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate", "dim, \"Lambda\": 5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z,", "labels. inplace: Updates model in-place. kwargs: Other parameters used in", "# ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()", "on Data Mining. Args: X: DxN, data matrix list. NumNodes:", "to the gene expression. nodes_key: The key that corresponds to", "matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes,", "**kwargs, ): model = model.copy() if not inplace else model", "padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes,", "return model if not inplace else None ############################## # Changes", "not inplace else None def construct_tree_model( nodes: np.ndarray, edges: np.ndarray,", "ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed x.append(x_length) return", "list] = None, n_points: int = 100, vec: Union[tuple, list]", "Other parameters used in simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py", "nodes_key: The key that corresponds to the coordinates of the", "A model, which contains the following properties: `model.point_data[key_added]`, the nodes", "slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value)", "len(nodes), 1) return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key:", "reconstruct model. nodes: The nodes in the principal tree. spatial_key:", "install -U simpleppt`.\" \"\\nInstall igraph via `pip install -U igraph`\"", "instance2=point2, dimension=3) x_length += ed x.append(x_length) return np.asarray(x), np.asarray(y), slices,", "UnstructuredGrid], key: Union[str, list] = None, n_points: int = 100,", "A three-dims principal tree model, which contains the following properties:", "the package `elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" )", "tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data, how=\"outer\",", "\"\"\" Generate a simple principal tree. Reference: Mao et al.", "nodes_key: Optional[str] = \"nodes\", inplace: bool = False, ): \"\"\"", "model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index", "from .slice import euclidean_distance, three_d_slice #################################### # Changes along a", "inplace else None ############################## # Changes along the branches #", "= False, **kwargs, ): \"\"\" Find the closest principal tree", "rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes: int = 50, inplace:", "The key under which to add the nodes labels. inplace:", "\"\"\" Find the closest principal tree node to any point", ") -> PolyData: \"\"\" Construct a principal tree model. Args:", "model: A reconstruct model. nodes: The nodes in the principal", "line_points, line = three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec, center=center )", "parameters used in elpigraph.computeElasticPrincipalTree. For details, please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns:", "= None, key_added: Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] =", "else model[spatial_key] DDRTree_kwargs = { \"maxIter\": 10, \"sigma\": 0.001, \"gamma\":", "changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, map_key: Union[str,", "stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs) #", "return nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes: int =", "None, map_key: Union[str, list] = None, key_added: Optional[str] = \"nodes\",", "= np.real(W).astype(np.float64) return model if not inplace else None ##############################", "Data Mining. Args: X: DxN, data matrix list. NumNodes: The", "principal tree. \"\"\" try: import igraph import simpleppt except ImportError:", "key: The key that corresponds to the gene expression. nodes_key:", "# Changes along a vector direction # #################################### def changes_along_line(", "Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via", "reconstruct model contains the gene expression label. tree: A three-dims", "nodes: The nodes in the principal tree. edges: The edges", "bool = False, ): \"\"\" Find the closest principal tree", "np.ndarray, key_added: Optional[str] = \"nodes\", ) -> PolyData: \"\"\" Construct", "return tree if not inplace else None def construct_tree_model( nodes:", "in the principal tree. key_added: The key under which to", "else: raise ValueError( \"`rd_method` value is wrong.\" \"\\nAvailable `rd_method` are:", "ImportError: raise ImportError( \"You need to install the package `simpleppt`", "import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from typing import Literal", "tree. \"\"\" try: import igraph import simpleppt except ImportError: raise", "in-place. kwargs: Other parameters used in scipy.spatial.KDTree. Returns: A model,", "W, Q, C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain the", "spatial_key is None else model[spatial_key] if rd_method == \"ElPiGraph\": nodes,", "0: x.append(0) else: point1 = line_points[point_i - 1].points.flatten() point2 =", "Q, C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain the real", "point in the model. If spatial_key is None, the coordinates", "Returns: nodes: The nodes in the principal tree. edges: The", "Mao et al. (2015), SimplePPT: A simple principal tree algorithm,", "direction # #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str,", "raise ImportError( \"You need to install the package `elpigraph-python`.\" \"\\nInstall", "tree model. Args: nodes: The nodes in the principal tree.", "tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (map_key is None): map_gene_to_branch(", "et al. (2020), Robust and Scalable Learning of Complex Intrinsic", "bool = False, **kwargs, ) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]:", "for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum()", "Reference: Mao et al. (2015), SimplePPT: A simple principal tree", "columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True) for", "key=map_key, nodes_key=key_added, inplace=True ) return model if not inplace else", "typing_extensions import Literal from .ddrtree import DDRTree, cal_ncenter from .slice", "UnstructuredGrid], spatial_key: Optional[str] = None, map_key: Union[str, list] = None,", "Union[PolyData, UnstructuredGrid], key: Union[str, list] = None, n_points: int =", "ImportError: from typing_extensions import Literal from .ddrtree import DDRTree, cal_ncenter", "The key that corresponds to the coordinates of the point", "import DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice #################################### #", "import Literal except ImportError: from typing_extensions import Literal from .ddrtree", "are model.points. key_added: The key under which to add the", "if spatial_key is None else model[spatial_key] if rd_method == \"ElPiGraph\":", "principal tree node to any point in the model through", "DxN, data matrix list. NumNodes: The number of nodes of", "edges_w_padding) tree_model.point_data[key_added] = np.arange(0, len(nodes), 1) return tree_model def changes_along_branch(", "= \"ElPiGraph\", NumNodes: int = 50, inplace: bool = False,", "is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return", "a principal tree model. Args: nodes: The nodes in the", "is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii", "= model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key] if", "== \"SimplePPT\": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise", "cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes", "simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes: The nodes", "NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\"", "nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes: int = 50,", "the real part of the complex argument model[key_added] = np.real(W).astype(np.float64)", "ElPiGraph_kwargs[\"GPU\"] is True: try: import cupy except ImportError: raise ImportError(", "the nodes labels array. \"\"\" from scipy.spatial import KDTree model", "Reference: Albergante et al. (2020), Robust and Scalable Learning of", "= 100, vec: Union[tuple, list] = (1, 0, 0), center:", "center=center ) x, y = [], [] x_length = 0", "real part of the complex argument model[key_added] = np.real(W).astype(np.float64) return", "\"\"\" try: import igraph import simpleppt except ImportError: raise ImportError(", "Other parameters used in scipy.spatial.KDTree. Returns: A model, which contains", "50, **kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a principal", "\"Lambda\": 5 * X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y,", "str) else key for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key])", "A tree, which contains the following properties: `tree.point_data[key]`, the gene", "raise ValueError( \"`rd_method` value is wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`,", "line = three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec, center=center ) x,", "tree. inplace: Updates tree model in-place. Returns: A tree, which", "\"\"\" padding = np.empty(edges.shape[0], int) * 2 padding[:] = 2", "**ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"]", "= model.copy() if not inplace else model X = model.points", "cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W, Q, C,", "tree.point_data[sub_key] = tree_data[sub_key].values return tree if not inplace else None", "and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.", "et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM", "**kwargs, ): \"\"\" Find the closest principal tree node to", "model if not inplace else None def map_gene_to_branch( model: Union[PolyData,", "return tree_model def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] =", "= DDRTree(X, **DDRTree_kwargs) # Obtain the real part of the", "of the principal graph. Use a range of 10 to", "simpleppt via `pip install -U simpleppt`.\" \"\\nInstall igraph via `pip", "ElPiGraph_kwargs = { \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\":", "not inplace else None ############################## # Changes along the branches", "X: np.ndarray, NumNodes: int = 50, **kwargs, ) -> Tuple[np.ndarray,", "ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() ) return", "the coordinates of the nodes in the tree. inplace: Updates", "coordinates of the point in the model. If spatial_key is", "that corresponds to the coordinates of the point in the", "\"alpha\": 0.01, \"FinalEnergy\": \"Penalized\", \"StoreGraphEvolution\": True, \"GPU\": False, } ElPiGraph_kwargs.update(kwargs)", "\"You need to install the package `cupy`.\" \"\\nInstall cupy via", "50, inplace: bool = False, **kwargs, ) -> Tuple[Union[DataSet, PolyData,", "+= ed x.append(x_length) return np.asarray(x), np.asarray(y), slices, line ################################# #", "principal tree. Reference: Mao et al. (2015), SimplePPT: A simple", "= (1, 0, 0), center: Union[tuple, list] = None, )", "Optional[str] = None, key_added: Optional[str] = \"nodes\", inplace: bool =", "`rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key, key_added=key_added,", "# Changes along the model shape # ################################# def changes_along_shape(", "list. NumNodes: The number of nodes of the principal graph.", "100 for ElPiGraph approach. **kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree.", "tree = tree.copy() if not inplace else tree tree_data =", "range(len(model_data.index)) tree = tree.copy() if not inplace else tree tree_data", "used in simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns: nodes:", "(np.dot(X.T, R) / R.sum(axis=0)).T B = ppt_tree.B edges = np.array(", "Union[str, list] = None, n_points: int = 100, vec: Union[tuple,", "point in the model through KDTree. Args: model: A reconstruct", "{ \"seed\": 1, \"lam\": 10, } SimplePPT_kwargs.update(kwargs) X = np.asarray(X)", "return model if not inplace else None def map_gene_to_branch( model:", "Conference on Data Mining. Args: X: DxN, data matrix list.", "inplace: bool = False, ): \"\"\" Find the closest principal", "of the nodes in the tree. inplace: Updates tree model", "model_data.index model_data.index = range(len(model_data.index)) tree = tree.copy() if not inplace", "**kwargs) elif rd_method == \"SimplePPT\": nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes,", "spatial_key is None, the coordinates are model.points. key_added: The key", "= None, key_added: Optional[str] = \"nodes\", inplace: bool = False,", "coordinates of the nodes in the tree. inplace: Updates tree", "\"\"\" try: import elpigraph except ImportError: raise ImportError( \"You need", "in key: tree.point_data[sub_key] = tree_data[sub_key].values return tree if not inplace", "principal tree. edges: The edges between nodes in the principal", "\"nodes\", ) -> PolyData: \"\"\" Construct a principal tree model.", "under which to add the nodes labels. Returns: A three-dims", "package `cupy`.\" \"\\nInstall cupy via `pip install cupy-cuda113`.\" ) elpi_tree", "from .ddrtree import DDRTree, cal_ncenter from .slice import euclidean_distance, three_d_slice", "argument model[key_added] = np.real(W).astype(np.float64) return model if not inplace else", "elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1) edges = np.array(np.nonzero(matrix_edges_weights),", "as pd import pyvista as pv from pyvista import DataSet,", "scipy.spatial.KDTree. Returns: A model, which contains the following properties: `model.point_data[key_added]`,", "ImportError( \"You need to install the package `elpigraph-python`.\" \"\\nInstall elpigraph-python", "through KDTree. Args: model: A reconstruct model. nodes: The nodes", "**kwargs) _, ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return", "model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key] if isinstance(key,", "center: Union[tuple, list] = None, ) -> Tuple[np.ndarray, np.ndarray, MultiBlock,", "X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights", "Changes along the branches # ############################## def ElPiGraph_tree( X: np.ndarray,", "tree. \"\"\" try: import elpigraph except ImportError: raise ImportError( \"You", "package `elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs", "): \"\"\" Find the closest principal tree node to any", "slices, line_points, line = three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec, center=center", "a simple principal tree. Reference: Mao et al. (2015), SimplePPT:", "tree. spatial_key: The key that corresponds to the coordinates of", "False, **kwargs, ): model = model.copy() if not inplace else", "array. \"\"\" from scipy.spatial import KDTree model = model.copy() if", "columns=[\"nodes_id\"]) key = [key] if isinstance(key, str) else key for", "= range(len(model_data.index)) tree = tree.copy() if not inplace else tree", "None else model[spatial_key] if rd_method == \"ElPiGraph\": nodes, edges =", "= ppt_tree.R nodes = (np.dot(X.T, R) / R.sum(axis=0)).T B =", "Updates model in-place. kwargs: Other parameters used in scipy.spatial.KDTree. Returns:", "\"ElPiGraph\", NumNodes: int = 50, inplace: bool = False, **kwargs,", "Args: X: DxN, data matrix list. NumNodes: The number of", "X: DxN, data matrix list. NumNodes: The number of nodes", "line_points[point_i - 1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2,", "gene expression array. \"\"\" model = model.copy() model_data = pd.DataFrame(model[nodes_key],", "ed x.append(x_length) return np.asarray(x), np.asarray(y), slices, line ################################# # Changes", "model_data.index = range(len(model_data.index)) tree = tree.copy() if not inplace else", "three_d_slice #################################### # Changes along a vector direction # ####################################", "/ R.sum(axis=0)).T B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B >", "PolyData]: model = model.copy() if not inplace else model X", "typing import Optional, Tuple, Union import numpy as np import", "): model = model.copy() if not inplace else model X", "model X = model.points if spatial_key is None else model[spatial_key]", "Geometry via ElPiGraph. Args: X: DxN, data matrix list. NumNodes:", "parameters used in simpleppt.ppt. For details, please see: https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py Returns:", "corresponds to the coordinates of the nodes in the tree.", "(point_i, point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if", "model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] = model_data.index model_data.index = range(len(model_data.index)) tree", "tree. key_added: The key under which to add the nodes", "\"\"\" Generate a principal elastic tree. Reference: Albergante et al.", "from typing import Optional, Tuple, Union import numpy as np", "\"\\nInstall igraph via `pip install -U igraph`\" ) SimplePPT_kwargs =", "used in scipy.spatial.KDTree. Returns: A model, which contains the following", "Returns: A model, which contains the following properties: `model.point_data[key_added]`, the", "ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, ) ->", "a range of 100 to 2000 for PPT approach. **kwargs:", "model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True ) return model if not", "= 50, inplace: bool = False, **kwargs, ) -> Tuple[Union[DataSet,", "} DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W, Q, C, objs", "`igraph`.\" \"\\nInstall simpleppt via `pip install -U simpleppt`.\" \"\\nInstall igraph", "Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.copy() if not inplace", "Construct a principal tree model. Args: nodes: The nodes in", "key under which to add the nodes labels. inplace: Updates", "following properties: `model.point_data[key_added]`, the nodes labels array. \"\"\" from scipy.spatial", "nodes in the principal tree. edges: The edges between nodes", "labels array. \"\"\" from scipy.spatial import KDTree model = model.copy()", "changes_along_shape( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str]", "**kwargs, ) -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a principal elastic", "= None, map_key: Union[str, list] = None, key_added: Optional[str] =", "key: Union[str, list], nodes_key: Optional[str] = \"nodes\", inplace: bool =", "principal tree model contains the nodes label. key: The key", "0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes, edges def map_points_to_branch( model: Union[PolyData,", "\"\"\" model = model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key =", "int = 2, inplace: bool = False, **kwargs, ): model", "elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"]", "def construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] = \"nodes\",", "nodes in the principal tree. key_added: The key under which", "model. nodes: The nodes in the principal tree. spatial_key: The", "PPT approach. **kwargs: Other parameters used in simpleppt.ppt. For details,", "from typing import Literal except ImportError: from typing_extensions import Literal", "[key] if isinstance(key, str) else key for sub_key in key:", "inplace else tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data,", "Generate a simple principal tree. Reference: Mao et al. (2015),", "{ \"maxIter\": 10, \"sigma\": 0.001, \"gamma\": 10, \"eps\": 0, \"dim\":", "tree node to any point in the model through KDTree.", "spatial_key: Optional[str] = None, map_key: Union[str, list] = None, key_added:", "need to install the package `simpleppt` and `igraph`.\" \"\\nInstall simpleppt", "np.asarray(y), slices, line ################################# # Changes along the model shape", "= None, key_added: Optional[str] = \"rd_spatial\", dim: int = 2,", "int = 100, vec: Union[tuple, list] = (1, 0, 0),", "dim: int = 2, inplace: bool = False, **kwargs, ):", "**DDRTree_kwargs) # Obtain the real part of the complex argument", "principal tree algorithm, SIAM International Conference on Data Mining. Args:", "import igraph import simpleppt except ImportError: raise ImportError( \"You need", ") -> Tuple[np.ndarray, np.ndarray]: \"\"\" Generate a simple principal tree.", "None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str,", "else key for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data", "install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = { \"alpha\": 0.01, \"FinalEnergy\": \"Penalized\",", "tree. Reference: Mao et al. (2015), SimplePPT: A simple principal", "install the package `cupy`.\" \"\\nInstall cupy via `pip install cupy-cuda113`.\"", "= ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs) elif rd_method == \"SimplePPT\": nodes, edges", "need to install the package `cupy`.\" \"\\nInstall cupy via `pip", "= np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes, edges", "line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3) x_length += ed x.append(x_length)", "- 1].points.flatten() point2 = line_points[point_i].points.flatten() ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3)", "nodes: np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str] = \"nodes\",", "100 to 2000 for PPT approach. **kwargs: Other parameters used", "model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, key_added: Optional[str] =", "array. \"\"\" model = model.copy() model_data = pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key", "\"\\nInstall simpleppt via `pip install -U simpleppt`.\" \"\\nInstall igraph via", "The nodes in the principal tree. edges: The edges between", "`cupy`.\" \"\\nInstall cupy via `pip install cupy-cuda113`.\" ) elpi_tree =", "the package `simpleppt` and `igraph`.\" \"\\nInstall simpleppt via `pip install", "k=1) model.point_data[key_added] = ii return model if not inplace else", "n_slices=n_points, vec=vec, center=center ) x, y = [], [] x_length", "ii = nodes_kdtree.query(np.asarray(X), k=1) model.point_data[key_added] = ii return model if", "three-dims principal tree model contains the nodes label. key: The", "inplace else None def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData,", "nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] = \"nodes\", ) ->", "= elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights", "# #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key: Union[str, list]", "NumNodes: int = 50, inplace: bool = False, **kwargs, )", "key under which to add the nodes labels. Returns: A", "Optional[str] = None, key_added: Optional[str] = \"rd_spatial\", dim: int =", "the principal tree. \"\"\" try: import igraph import simpleppt except", "(1, 0, 0), center: Union[tuple, list] = None, ) ->", "Union import numpy as np import pandas as pd import", "R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs) # Obtain", "vector direction # #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid], key:", "please see: https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py Returns: nodes: The nodes in the principal", "key for sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data =", "R) / R.sum(axis=0)).T B = ppt_tree.B edges = np.array( igraph.Graph.Adjacency((B", "elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = { \"alpha\":", "model contains the nodes label. key: The key that corresponds", "= tree_data[sub_key].values return tree if not inplace else None def", "three_d_slice( model=model, method=\"line\", n_slices=n_points, vec=vec, center=center ) x, y =", "np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose() return nodes, edges def SimplePPT_tree( X: np.ndarray, NumNodes:", "X = model.points if spatial_key is None else model[spatial_key] if", "edges def SimplePPT_tree( X: np.ndarray, NumNodes: int = 50, **kwargs,", "if not (map_key is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added,", "a vector direction # #################################### def changes_along_line( model: Union[PolyData, UnstructuredGrid],", "SIAM International Conference on Data Mining. Args: X: DxN, data", "Returns: A tree, which contains the following properties: `tree.point_data[key]`, the", "parameters used in scipy.spatial.KDTree. Returns: A model, which contains the", "that corresponds to the gene expression. nodes_key: The key that", "inplace: bool = False, **kwargs, ): model = model.copy() if", "def ElPiGraph_tree( X: np.ndarray, NumNodes: int = 50, **kwargs, )", "Optional[str] = \"nodes\", ) -> PolyData: \"\"\" Construct a principal", "Optional, Tuple, Union import numpy as np import pandas as", "sub_key in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"]", "\"\"\" Construct a principal tree model. Args: nodes: The nodes", "inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if not (map_key is", "construct_tree_model( nodes: np.ndarray, edges: np.ndarray, key_added: Optional[str] = \"nodes\", )", "['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights, 1)", "a principal elastic tree. Reference: Albergante et al. (2020), Robust", "nodes of the principal graph. Use a range of 10", "via `pip install -U simpleppt`.\" \"\\nInstall igraph via `pip install", "= construct_tree_model(nodes=nodes, edges=edges) if not (map_key is None): map_gene_to_branch( model=model,", "if not inplace else model X = model.points if spatial_key", "`elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs =", "via `pip install cupy-cuda113`.\" ) elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes,", "for slice, (point_i, point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum()", "of the principal graph. Use a range of 100 to", "the branches # ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int", "model in-place. Returns: A tree, which contains the following properties:", "pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data, how=\"outer\", on=\"nodes_id\") tree_data.fillna(value=0, inplace=True)", "NumNodes=NumNodes, **kwargs) elif rd_method == \"SimplePPT\": nodes, edges = SimplePPT_tree(X=X,", "not (map_key is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True", "np.asarray(slice[key]).sum() y.append(change_value) if point_i == 0: x.append(0) else: point1 =", "DDRTree_kwargs = { \"maxIter\": 10, \"sigma\": 0.001, \"gamma\": 10, \"eps\":", "through KDTree. Args: model: A reconstruct model contains the gene", "Updates tree model in-place. Returns: A tree, which contains the", "\"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\" ) map_points_to_branch( model=model, nodes=nodes, spatial_key=spatial_key,", "a range of 10 to 100 for ElPiGraph approach. **kwargs:", ") -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]: model = model.copy() if", "need to install the package `elpigraph-python`.\" \"\\nInstall elpigraph-python via `pip", "= np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs) R = ppt_tree.R", "Literal except ImportError: from typing_extensions import Literal from .ddrtree import", "* X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree, R,", "# ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights = np.triu(matrix_edges_weights,", "= \"rd_spatial\", dim: int = 2, inplace: bool = False,", "KDTree model = model.copy() if not inplace else model X", "of 100 to 2000 for PPT approach. **kwargs: Other parameters", "Literal from .ddrtree import DDRTree, cal_ncenter from .slice import euclidean_distance,", "al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset", "model.points if spatial_key is None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes),", "elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes = elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k]", "vec: Union[tuple, list] = (1, 0, 0), center: Union[tuple, list]", "Z, Y, stree, R, W, Q, C, objs = DDRTree(X,", "nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs) else: raise ValueError( \"`rd_method`", "np import pandas as pd import pyvista as pv from", "import simpleppt except ImportError: raise ImportError( \"You need to install", "} SimplePPT_kwargs.update(kwargs) X = np.asarray(X) ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)", "tree.copy() if not inplace else tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"])", "add the nodes labels. Returns: A three-dims principal tree model,", ") elpi_tree = elpigraph.computeElasticPrincipalTree( X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs ) nodes =", "100, vec: Union[tuple, list] = (1, 0, 0), center: Union[tuple,", "**SimplePPT_kwargs) R = ppt_tree.R nodes = (np.dot(X.T, R) / R.sum(axis=0)).T", "igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes, edges def map_points_to_branch(", "edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding) tree_model.point_data[key_added] =", "edges=edges) if not (map_key is None): map_gene_to_branch( model=model, tree=tree_model, key=map_key,", "principal tree. \"\"\" try: import elpigraph except ImportError: raise ImportError(", "Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None, key_added: Optional[str]", "model. If spatial_key is None, the coordinates are model.points. key_added:", "False, ): \"\"\" Find the closest principal tree node to", "The key that corresponds to the coordinates of the nodes", "labels. Returns: A three-dims principal tree model, which contains the", "else tree tree_data = pd.DataFrame(tree[nodes_key], columns=[\"nodes_id\"]) tree_data = pd.merge(tree_data, model_data,", "# Changes along the branches # ############################## def ElPiGraph_tree( X:", "if isinstance(key, str) else key for sub_key in key: model_data[sub_key]", "The edges between nodes in the principal tree. \"\"\" try:", "else None ############################## # Changes along the branches # ##############################", "Args: model: A reconstruct model contains the gene expression label.", "model: Union[PolyData, UnstructuredGrid], key: Union[str, list] = None, n_points: int", "point) in zip(slices, enumerate(line_points)): change_value = np.asarray(slice[key]).sum() y.append(change_value) if point_i", "nodes in the principal tree. \"\"\" try: import igraph import", "pv from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid try: from", "ValueError( \"`rd_method` value is wrong.\" \"\\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`.\"", "via `pip install -U igraph`\" ) SimplePPT_kwargs = { \"seed\":", "in key: model_data[sub_key] = np.asarray(model[sub_key]) model_data = model_data.groupby(by=\"nodes_id\").sum() model_data[\"nodes_id\"] =", "Union[tuple, list] = (1, 0, 0), center: Union[tuple, list] =", "the nodes label. key: The key that corresponds to the", "label. key: The key that corresponds to the gene expression.", "if not inplace else None ############################## # Changes along the", "def changes_along_branch( model: Union[PolyData, UnstructuredGrid], spatial_key: Optional[str] = None, map_key:", "False, } ElPiGraph_kwargs.update(kwargs) if ElPiGraph_kwargs[\"GPU\"] is True: try: import cupy", "elastic tree. Reference: Albergante et al. (2020), Robust and Scalable", "Albergante et al. (2020), Robust and Scalable Learning of Complex", "X = model.points if spatial_key is None else model[spatial_key] nodes_kdtree", "def map_gene_to_branch( model: Union[PolyData, UnstructuredGrid], tree: PolyData, key: Union[str, list],", "map_key: Union[str, list] = None, key_added: Optional[str] = \"nodes\", rd_method:", "except ImportError: raise ImportError( \"You need to install the package", "igraph via `pip install -U igraph`\" ) SimplePPT_kwargs = {", "elpi_tree[0][\"NodePositions\"] # ['AllNodePositions'][k] matrix_edges_weights = elpi_tree[0][\"ElasticMatrix\"] # ['AllElasticMatrices'][k] matrix_edges_weights =", "model through KDTree. Args: model: A reconstruct model. nodes: The", "`pip install git+https://github.com/j-bac/elpigraph-python.git`.\" ) ElPiGraph_kwargs = { \"alpha\": 0.01, \"FinalEnergy\":", "= pd.DataFrame(model[nodes_key], columns=[\"nodes_id\"]) key = [key] if isinstance(key, str) else", "pandas as pd import pyvista as pv from pyvista import", "2 padding[:] = 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model =", "spatial_key: Optional[str] = None, key_added: Optional[str] = \"rd_spatial\", dim: int", "Changes along a vector direction # #################################### def changes_along_line( model:", "None else model[spatial_key] nodes_kdtree = KDTree(np.asarray(nodes), **kwargs) _, ii =", "key_added: Optional[str] = \"nodes\", rd_method: Literal[\"ElPiGraph\", \"SimplePPT\"] = \"ElPiGraph\", NumNodes:", "spatial_key=spatial_key, key_added=key_added, inplace=True, ) tree_model = construct_tree_model(nodes=nodes, edges=edges) if not", "= 2 edges_w_padding = np.vstack((padding, edges.T)).T tree_model = pv.PolyData(nodes, edges_w_padding)", "X.shape[1], \"ncenter\": cal_ncenter(X.shape[1]), } DDRTree_kwargs.update(kwargs) Z, Y, stree, R, W,", "np.ndarray]: \"\"\" Generate a principal elastic tree. Reference: Albergante et", "Mining. Args: X: DxN, data matrix list. NumNodes: The number", "A reconstruct model. nodes: The nodes in the principal tree.", "np.array( igraph.Graph.Adjacency((B > 0).tolist(), mode=\"undirected\").get_edgelist() ) return nodes, edges def", "label. tree: A three-dims principal tree model contains the nodes", "model: Union[PolyData, UnstructuredGrid], nodes: np.ndarray, spatial_key: Optional[str] = None, key_added:", "branches # ############################## def ElPiGraph_tree( X: np.ndarray, NumNodes: int =", "else model X = model.points if spatial_key is None else" ]
[ "is equal old list with added group\"): new_groups = db.get_group_list()", "a group list\"): old_groups = db.get_group_list() #with pytest.allure.step(\"When I add", "1 #with pytest.allure.step(\"When the new groups list is equal old", "assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert", "<filename>test/test_add_group.py<gh_stars>0 # -*- coding: utf-8 -*- from model.group import Group", "import Group import pytest import allure_pytest def test_add_group(app, db, check_ui,", "json_groups): group0 = json_groups #with pytest.allure.step(\"Given a group list\"): old_groups", "old_groups = db.get_group_list() #with pytest.allure.step(\"When I add a group %s", "= db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if", "key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert sorted(new_groups, key=Group.id_or_max) == \\ sorted(app.group.get_groups_list(),", "group0 = json_groups #with pytest.allure.step(\"Given a group list\"): old_groups =", "list is equal old list with added group\"): new_groups =", "json_groups #with pytest.allure.step(\"Given a group list\"): old_groups = db.get_group_list() #with", "check_ui, json_groups): group0 = json_groups #with pytest.allure.step(\"Given a group list\"):", "add a group %s to the list\" % group0): app.group.create(group0)", "import allure_pytest def test_add_group(app, db, check_ui, json_groups): group0 = json_groups", "I add a group %s to the list\" % group0):", "%s to the list\" % group0): app.group.create(group0) #assert app.group.count() ==", "group0): app.group.create(group0) #assert app.group.count() == len(old_groups) + 1 #with pytest.allure.step(\"When", "pytest.allure.step(\"When the new groups list is equal old list with", "list\"): old_groups = db.get_group_list() #with pytest.allure.step(\"When I add a group", "len(old_groups) + 1 #with pytest.allure.step(\"When the new groups list is", "test_add_group(app, db, check_ui, json_groups): group0 = json_groups #with pytest.allure.step(\"Given a", "utf-8 -*- from model.group import Group import pytest import allure_pytest", "coding: utf-8 -*- from model.group import Group import pytest import", "-*- coding: utf-8 -*- from model.group import Group import pytest", "to the list\" % group0): app.group.create(group0) #assert app.group.count() == len(old_groups)", "new groups list is equal old list with added group\"):", "list with added group\"): new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups,", "if check_ui: print(\"CHECK_UI\") assert sorted(new_groups, key=Group.id_or_max) == \\ sorted(app.group.get_groups_list(), key=Group.id_or_max)", "old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\")", "-*- from model.group import Group import pytest import allure_pytest def", "def test_add_group(app, db, check_ui, json_groups): group0 = json_groups #with pytest.allure.step(\"Given", "pytest.allure.step(\"Given a group list\"): old_groups = db.get_group_list() #with pytest.allure.step(\"When I", "= json_groups #with pytest.allure.step(\"Given a group list\"): old_groups = db.get_group_list()", "db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui:", "= db.get_group_list() #with pytest.allure.step(\"When I add a group %s to", "group %s to the list\" % group0): app.group.create(group0) #assert app.group.count()", "== sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert sorted(new_groups, key=Group.id_or_max) ==", "equal old list with added group\"): new_groups = db.get_group_list() old_groups.append(group0)", "db.get_group_list() #with pytest.allure.step(\"When I add a group %s to the", "group list\"): old_groups = db.get_group_list() #with pytest.allure.step(\"When I add a", "model.group import Group import pytest import allure_pytest def test_add_group(app, db,", "new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)", "list\" % group0): app.group.create(group0) #assert app.group.count() == len(old_groups) + 1", "the list\" % group0): app.group.create(group0) #assert app.group.count() == len(old_groups) +", "the new groups list is equal old list with added", "old list with added group\"): new_groups = db.get_group_list() old_groups.append(group0) assert", "group\"): new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups,", "a group %s to the list\" % group0): app.group.create(group0) #assert", "db, check_ui, json_groups): group0 = json_groups #with pytest.allure.step(\"Given a group", "sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert sorted(new_groups,", "from model.group import Group import pytest import allure_pytest def test_add_group(app,", "+ 1 #with pytest.allure.step(\"When the new groups list is equal", "key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert sorted(new_groups, key=Group.id_or_max)", "#with pytest.allure.step(\"When I add a group %s to the list\"", "sorted(new_groups, key=Group.id_or_max) if check_ui: print(\"CHECK_UI\") assert sorted(new_groups, key=Group.id_or_max) == \\", "added group\"): new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max) ==", "#assert app.group.count() == len(old_groups) + 1 #with pytest.allure.step(\"When the new", "#with pytest.allure.step(\"When the new groups list is equal old list", "app.group.count() == len(old_groups) + 1 #with pytest.allure.step(\"When the new groups", "# -*- coding: utf-8 -*- from model.group import Group import", "import pytest import allure_pytest def test_add_group(app, db, check_ui, json_groups): group0", "allure_pytest def test_add_group(app, db, check_ui, json_groups): group0 = json_groups #with", "#with pytest.allure.step(\"Given a group list\"): old_groups = db.get_group_list() #with pytest.allure.step(\"When", "app.group.create(group0) #assert app.group.count() == len(old_groups) + 1 #with pytest.allure.step(\"When the", "with added group\"): new_groups = db.get_group_list() old_groups.append(group0) assert sorted(old_groups, key=Group.id_or_max)", "== len(old_groups) + 1 #with pytest.allure.step(\"When the new groups list", "Group import pytest import allure_pytest def test_add_group(app, db, check_ui, json_groups):", "pytest import allure_pytest def test_add_group(app, db, check_ui, json_groups): group0 =", "% group0): app.group.create(group0) #assert app.group.count() == len(old_groups) + 1 #with", "groups list is equal old list with added group\"): new_groups", "pytest.allure.step(\"When I add a group %s to the list\" %" ]
[ "fake implementation. Each node in the tree represents a frame", "caller frame. Nodes are also indexed by frames' physical location", "__future__ import annotations from .frame import Frame from .generated.communication_pb2 import", "\"\"\" Right now it's a fake implementation, where we return", "connected. Call order is preserved among callee frames of the", "- Add indexes. - Implement frame search. \"\"\" # Keyed", "return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) -> Frame: assert cls.frames", "exists during program execution. Caller and callee frames are connected.", "find_frames(cls, position: CursorPosition) -> list[Frame]: \"\"\" Right now it's a", "frame. Nodes are also indexed by frames' physical location (file", "implementation. Each node in the tree represents a frame that", "it's a fake implementation, where we return the only existing", "store all frames. For now it's a fake implementation. Each", ".frame import Frame from .generated.communication_pb2 import CursorPosition class FrameTree: \"\"\"A", "cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) -> Frame: assert", "def find_frames(cls, position: CursorPosition) -> list[Frame]: \"\"\" Right now it's", "now it's a fake implementation, where we return the only", "frame that ever exists during program execution. Caller and callee", "implementation, where we return the only existing frame. \"\"\" assert", "the same caller frame. Nodes are also indexed by frames'", "among callee frames of the same caller frame. Nodes are", "-> list[Frame]: \"\"\" Right now it's a fake implementation, where", "- Implement frame search. \"\"\" # Keyed by frame ID.", "FrameTree: \"\"\"A tree to store all frames. For now it's", "Caller and callee frames are connected. Call order is preserved", ".generated.communication_pb2 import CursorPosition class FrameTree: \"\"\"A tree to store all", "of the same caller frame. Nodes are also indexed by", "@classmethod def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] = frame print(frame_id,", "by frame ID. frames: dict[str, Frame] = dict() @classmethod def", "callee frames of the same caller frame. Nodes are also", "add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] = frame print(frame_id, frame) @classmethod", "import Frame from .generated.communication_pb2 import CursorPosition class FrameTree: \"\"\"A tree", "a frame that ever exists during program execution. Caller and", "that ever exists during program execution. Caller and callee frames", "Frame from .generated.communication_pb2 import CursorPosition class FrameTree: \"\"\"A tree to", "CursorPosition class FrameTree: \"\"\"A tree to store all frames. For", "Keyed by frame ID. frames: dict[str, Frame] = dict() @classmethod", "order is preserved among callee frames of the same caller", "frame print(frame_id, frame) @classmethod def find_frames(cls, position: CursorPosition) -> list[Frame]:", "tree to store all frames. For now it's a fake", "physical location (file name, line range). TODO: - Add indexes.", "def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] = frame print(frame_id, frame)", "during program execution. Caller and callee frames are connected. Call", "and callee frames are connected. Call order is preserved among", "return the only existing frame. \"\"\" assert cls.frames return [next(iter(cls.frames.values()))]", "indexed by frames' physical location (file name, line range). TODO:", "assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) -> Frame:", "same caller frame. Nodes are also indexed by frames' physical", "are also indexed by frames' physical location (file name, line", "annotations from .frame import Frame from .generated.communication_pb2 import CursorPosition class", "# Keyed by frame ID. frames: dict[str, Frame] = dict()", "Frame] = dict() @classmethod def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id]", "frame) @classmethod def find_frames(cls, position: CursorPosition) -> list[Frame]: \"\"\" Right", "where we return the only existing frame. \"\"\" assert cls.frames", "import annotations from .frame import Frame from .generated.communication_pb2 import CursorPosition", "frames of the same caller frame. Nodes are also indexed", "location (file name, line range). TODO: - Add indexes. -", "a fake implementation, where we return the only existing frame.", "fake implementation, where we return the only existing frame. \"\"\"", "we return the only existing frame. \"\"\" assert cls.frames return", "(file name, line range). TODO: - Add indexes. - Implement", "= frame print(frame_id, frame) @classmethod def find_frames(cls, position: CursorPosition) ->", "program execution. Caller and callee frames are connected. Call order", "frame. \"\"\" assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id)", "\"\"\" assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) ->", "frames: dict[str, Frame] = dict() @classmethod def add_frame(cls, frame_id, frame:", "dict() @classmethod def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] = frame", "represents a frame that ever exists during program execution. Caller", "frames are connected. Call order is preserved among callee frames", "callee frames are connected. Call order is preserved among callee", "existing frame. \"\"\" assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def get_frame(cls,", "For now it's a fake implementation. Each node in the", "only existing frame. \"\"\" assert cls.frames return [next(iter(cls.frames.values()))] @classmethod def", "cls.frames[frame_id] = frame print(frame_id, frame) @classmethod def find_frames(cls, position: CursorPosition)", "\"\"\" # Keyed by frame ID. frames: dict[str, Frame] =", "class FrameTree: \"\"\"A tree to store all frames. For now", "@classmethod def find_frames(cls, position: CursorPosition) -> list[Frame]: \"\"\" Right now", "from __future__ import annotations from .frame import Frame from .generated.communication_pb2", "Right now it's a fake implementation, where we return the", "frame_id, frame: Frame): cls.frames[frame_id] = frame print(frame_id, frame) @classmethod def", "range). TODO: - Add indexes. - Implement frame search. \"\"\"", "[next(iter(cls.frames.values()))] @classmethod def get_frame(cls, frame_id) -> Frame: assert cls.frames return", "to store all frames. For now it's a fake implementation.", "by frames' physical location (file name, line range). TODO: -", "frame: Frame): cls.frames[frame_id] = frame print(frame_id, frame) @classmethod def find_frames(cls,", "frame search. \"\"\" # Keyed by frame ID. frames: dict[str,", "in the tree represents a frame that ever exists during", "a fake implementation. Each node in the tree represents a", "node in the tree represents a frame that ever exists", "Frame): cls.frames[frame_id] = frame print(frame_id, frame) @classmethod def find_frames(cls, position:", "from .generated.communication_pb2 import CursorPosition class FrameTree: \"\"\"A tree to store", "Implement frame search. \"\"\" # Keyed by frame ID. frames:", "import CursorPosition class FrameTree: \"\"\"A tree to store all frames.", "now it's a fake implementation. Each node in the tree", "line range). TODO: - Add indexes. - Implement frame search.", "Call order is preserved among callee frames of the same", "frames' physical location (file name, line range). TODO: - Add", "frame ID. frames: dict[str, Frame] = dict() @classmethod def add_frame(cls,", "ID. frames: dict[str, Frame] = dict() @classmethod def add_frame(cls, frame_id,", "from .frame import Frame from .generated.communication_pb2 import CursorPosition class FrameTree:", "name, line range). TODO: - Add indexes. - Implement frame", "CursorPosition) -> list[Frame]: \"\"\" Right now it's a fake implementation,", "Each node in the tree represents a frame that ever", "position: CursorPosition) -> list[Frame]: \"\"\" Right now it's a fake", "also indexed by frames' physical location (file name, line range).", "frames. For now it's a fake implementation. Each node in", "search. \"\"\" # Keyed by frame ID. frames: dict[str, Frame]", "are connected. Call order is preserved among callee frames of", "print(frame_id, frame) @classmethod def find_frames(cls, position: CursorPosition) -> list[Frame]: \"\"\"", "the tree represents a frame that ever exists during program", "preserved among callee frames of the same caller frame. Nodes", "= dict() @classmethod def add_frame(cls, frame_id, frame: Frame): cls.frames[frame_id] =", "indexes. - Implement frame search. \"\"\" # Keyed by frame", "@classmethod def get_frame(cls, frame_id) -> Frame: assert cls.frames return cls.frames[frame_id]", "\"\"\"A tree to store all frames. For now it's a", "TODO: - Add indexes. - Implement frame search. \"\"\" #", "Add indexes. - Implement frame search. \"\"\" # Keyed by", "ever exists during program execution. Caller and callee frames are", "list[Frame]: \"\"\" Right now it's a fake implementation, where we", "the only existing frame. \"\"\" assert cls.frames return [next(iter(cls.frames.values()))] @classmethod", "execution. Caller and callee frames are connected. Call order is", "dict[str, Frame] = dict() @classmethod def add_frame(cls, frame_id, frame: Frame):", "all frames. For now it's a fake implementation. Each node", "Nodes are also indexed by frames' physical location (file name,", "it's a fake implementation. Each node in the tree represents", "is preserved among callee frames of the same caller frame.", "tree represents a frame that ever exists during program execution." ]
[ "little benefit. (Default: ``False``). .. attribute:: param_sl *CharField*: The level", "import b64decode from binascii import hexlify, unhexlify from struct import", "try: public_id, otp = decode_otp(token, self.bin_key) except Exception: return False", "= False if token[:-32] == self.public_id: client = self.service.get_client() response", "device. .. attribute:: counter *PositiveIntegerField*: The volatile session usage counter", "optional but strongly recommended. (Default: ``''``) .. attribute:: base_url *URLField*:", "attribute:: api_id *IntegerField*: Your API ID. The server needs this", "models.CharField( max_length=64, blank=True, default='', verbose_name=\"API key\", help_text=\"Your base64-encoded API key.\"", "== '1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl) else: client =", "hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class", "at any other service implementing the same protocol. .. attribute::", "verbose_name=\"SL\", help_text=\"The level of syncing required.\" ) param_timeout = models.CharField(", "@property def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token,", "hosted API. .. attribute:: api_version *CharField*: The version of the", "API_VERSIONS = ['1.0', '1.1', '2.0'] name = models.CharField( max_length=32, help_text=\"The", "import absolute_import, division, print_function, unicode_literals from base64 import b64decode from", "not verify certificates, this provides little benefit. (Default: ``False``). ..", "use for this device. .. attribute:: public_id *CharField*: The public", "return False if hexlify(otp.uid) != self.private_id.encode(): return False if otp.session", "bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token, six.text_type): token", "same protocol. .. attribute:: name *CharField*: The name of this", "otp.session < self.session: return False if (otp.session == self.session) and", ".. attribute:: param_sl *CharField*: The level of syncing required. See", "the validation api to use.\" ) use_ssl = models.BooleanField( default=False,", "def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token, six.text_type):", "a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The", "locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The 6-byte", "YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url = self.base_url return client", "= models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your API ID.\" ) api_key", "verbose_name=\"API key\", help_text=\"Your base64-encoded API key.\" ) base_url = models.URLField(", "client = YubiClient11(self.api_id, api_key, self.use_ssl) else: client = YubiClient10(self.api_id, api_key,", "In order create these devices, you must have at least", "self.counter = otp.counter self.save() return True class ValidationService(models.Model): \"\"\" Represents", "YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The 6-byte private", "return hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value)", "base64-encoded API key, used to sign requests. This is optional", "(hex-encoded). .. attribute:: session *PositiveIntegerField*: The non-volatile session counter most", "volatile session usage counter most recently used by this device.", "key shared with this YubiKey (hex-encoded).\" ) session = models.PositiveIntegerField(", "volatile session usage counter most recently used by this device.\"", "default?\" ) param_sl = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The", "if self.api_version == '2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl, False,", ") api_id = models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your API ID.\"", "remote validation service. In order create these devices, you must", "= otp.session self.counter = otp.counter self.save() return True class ValidationService(models.Model):", "counter most recently used by this device. \"\"\" private_id =", "return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def default_key(): return force_text(random_hex(16))", "shared with this YubiKey (hex-encoded).\" ) session = models.PositiveIntegerField( default=0,", "self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field = 'id' @property def", "*CharField*: The 16-byte AES key shared with this YubiKey (hex-encoded).", "used to sign requests. This is optional but strongly recommended.", "time to allow for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS =", "= models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS API URLs by", "YubiKey device that is to be verified with a remote", "device.\" ) counter = models.PositiveIntegerField( default=0, help_text=\"The volatile session usage", "class Meta(Device.Meta): verbose_name = \"Remote YubiKey device\" def verify_token(self, token):", "hosted service, which you can customize. You can also create", "device. .. attribute:: public_id *CharField*: The public identity of the", "use the HTTPS versions of the default URLs. Because :mod:`urllib2`", "The version of the validation API to use: '1.0', '1.1',", "attribute:: private_id *CharField*: The 6-byte private ID (hex-encoded). .. attribute::", "class Meta(Device.Meta): verbose_name = \"Local YubiKey device\" def public_id(self): \"\"\"", "six from django.db import models from django.utils.encoding import force_text from", "private ID (hex-encoded). .. attribute:: key *CharField*: The 16-byte AES", "< self.session: return False if (otp.session == self.session) and (otp.counter", "counters and return the good news. self.session = otp.session self.counter", "django_otp.models import Device from django_otp.util import hex_validator, random_hex from yubiotp.client", "level of syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*:", "you must have at least one :class:`~otp_yubikey.models.ValidationService` in the database.", "models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public identity", ".. attribute:: name *CharField*: The name of this validation service.", "'2.0'. (Default: '2.0') .. attribute:: use_ssl *BooleanField*: If ``True``, we'll", "have at least one :class:`~otp_yubikey.models.ValidationService` in the database. .. attribute::", "(otp.counter <= self.counter): return False # All tests pass. Update", "import six from django.db import models from django.utils.encoding import force_text", "from yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import modhex", "of the verification service. Defaults to Yubico's hosted API.\" )", "get_client(self): api_key = b64decode(self.api_key.encode()) or None if self.api_version == '2.0':", "'1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl) else: client = YubiClient10(self.api_id,", "attribute:: service *ForeignKey*: The validation service to use for this", "non-volatile session counter most recently used by this device.\" )", "Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*:", "self.param_sl or None, self.param_timeout or None) elif self.api_version == '1.1':", ".. attribute:: api_version *CharField*: The version of the validation API", "be verified with a remote validation service. In order create", "recommended. (Default: ``''``) .. attribute:: base_url *URLField*: The base URL", "token[:-32] == self.public_id: client = self.service.get_client() response = client.verify(token) verified", "validation web service. By default, this will point to Yubico's", "this provides little benefit. (Default: ``False``). .. attribute:: param_sl *CharField*:", "used by this device.\" ) class Meta(Device.Meta): verbose_name = \"Local", "1) .. attribute:: api_key *CharField*: Your base64-encoded API key, used", "service\" def __unicode__(self): return self.name def get_client(self): api_key = b64decode(self.api_key.encode())", "(hex-encoded).\" ) key = models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte", "self.session = otp.session self.counter = otp.counter self.save() return True class", "force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents a", "level of syncing required.\" ) param_timeout = models.CharField( max_length=16, blank=True,", "api_version *CharField*: The version of the validation API to use:", "``False``). .. attribute:: param_sl *CharField*: The level of syncing required.", "the YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name = \"Remote YubiKey device\"", "The name of this validation service. .. attribute:: api_id *IntegerField*:", "default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS API URLs by default?\" )", "attribute:: key *CharField*: The 16-byte AES key shared with this", "None, self.param_timeout or None) elif self.api_version == '1.1': client =", "*CharField*: The name of this validation service. .. attribute:: api_id", "with a remote validation service. In order create these devices,", "attribute:: session *PositiveIntegerField*: The non-volatile session counter most recently used", "Meta(Device.Meta): verbose_name = \"Remote YubiKey device\" def verify_token(self, token): verified", "from binascii import hexlify, unhexlify from struct import pack import", "on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public identity of", ":class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The 6-byte private ID (hex-encoded).", "return False if otp.session < self.session: return False if (otp.session", "Yubico's official hosted service, which you can customize. You can", "default=default_key, help_text=\"The 16-byte AES key shared with this YubiKey (hex-encoded).\"", "\"\"\" The public ID of this device is the four-byte,", "\"\"\" Represents a YubiKey device that is to be verified", "The server needs this to sign responsees. (Default: 1) ..", "This is optional but strongly recommended. (Default: ``''``) .. attribute::", "== self.session) and (otp.counter <= self.counter): return False # All", "of this validation service.\" ) api_id = models.IntegerField( default=1, verbose_name=\"API", "YubiKey (modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32,", "this device. \"\"\" private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private", "news. self.session = otp.session self.counter = otp.counter self.save() return True", "database. .. attribute:: service *ForeignKey*: The validation service to use", "HTTPS API URLs by default?\" ) param_sl = models.CharField( max_length=16,", "help_text=\"Your base64-encoded API key.\" ) base_url = models.URLField( blank=True, default='',", "Represents a YubiKey validation web service. By default, this will", "service. In order create these devices, you must have at", "max_length=32, help_text=\"The name of this validation service.\" ) api_id =", "= b64decode(self.api_key.encode()) or None if self.api_version == '2.0': client =", "verified with a remote validation service. In order create these", "\"Local YubiKey device\" def public_id(self): \"\"\" The public ID of", "attribute:: public_id *CharField*: The public identity of the YubiKey (modhex-encoded).", "identity of the YubiKey (modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE)", "decode_otp def default_id(): return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def", "usage counter most recently used by this device. \"\"\" private_id", "== self.public_id: client = self.service.get_client() response = client.verify(token) verified =", "ID of this device is the four-byte, big-endian, modhex-encoded primary", "= models.URLField( blank=True, default='', verbose_name=\"Base URL\", help_text=\"The base URL of", "session *PositiveIntegerField*: The non-volatile session counter most recently used by", "Your API ID. The server needs this to sign responsees.", "usage counter most recently used by this device.\" ) class", "self.api_version == '1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl) else: client", "by this device. \"\"\" private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id,", "must have at least one :class:`~otp_yubikey.models.ValidationService` in the database. ..", "*CharField*: The level of syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute::", "*PositiveIntegerField*: The volatile session usage counter most recently used by", "YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None)", "The 6-byte private ID (hex-encoded). .. attribute:: key *CharField*: The", "that is to be verified with a remote validation service.", "default='', verbose_name=\"Base URL\", help_text=\"The base URL of the verification service.", "is to be verified with a remote validation service. In", "Defaults to Yubico's hosted API. .. attribute:: api_version *CharField*: The", "version of the validation API to use: '1.0', '1.1', or", "if self.base_url: client.base_url = self.base_url return client class RemoteYubikeyDevice(Device): \"\"\"", "False if otp.session < self.session: return False if (otp.session ==", "device\" def public_id(self): \"\"\" The public ID of this device", "you can customize. You can also create instances to point", ":mod:`urllib2` does not verify certificates, this provides little benefit. (Default:", "API key, used to sign requests. This is optional but", "See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0', '1.1', '2.0'] name =", "create these devices, you must have at least one :class:`~otp_yubikey.models.ValidationService`", "or None, self.param_timeout or None) elif self.api_version == '1.1': client", "6-byte private ID (hex-encoded). .. attribute:: key *CharField*: The 16-byte", "point to Yubico's official hosted service, which you can customize.", ".. attribute:: counter *PositiveIntegerField*: The volatile session usage counter most", "of the YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name = \"Remote YubiKey", "token = token.encode('utf-8') try: public_id, otp = decode_otp(token, self.bin_key) except", "API ID.\" ) api_key = models.CharField( max_length=64, blank=True, default='', verbose_name=\"API", "param_timeout *CharField*: The time to allow for syncing. See :class:`~yubiotp.client.YubiClient20`.", "!= self.public_id(): return False if hexlify(otp.uid) != self.private_id.encode(): return False", ":class:`~otp_yubikey.models.ValidationService` in the database. .. attribute:: service *ForeignKey*: The validation", "a remote validation service. In order create these devices, you", "decode_otp(token, self.bin_key) except Exception: return False if public_id != self.public_id():", "the verification service. Defaults to Yubico's hosted API.\" ) api_version", "The validation service to use for this device. .. attribute::", "api_key, self.use_ssl) else: client = YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url:", "force_text from django_otp.models import Device from django_otp.util import hex_validator, random_hex", "versions of the default URLs. Because :mod:`urllib2` does not verify", "import modhex from yubiotp.otp import decode_otp def default_id(): return force_text(random_hex(6))", "elif self.api_version == '1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl) else:", "YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name = \"Remote YubiKey device\" def", "strongly recommended. (Default: ``''``) .. attribute:: base_url *URLField*: The base", "The time to allow for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS", "import hex_validator, random_hex from yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from", "to use for this device. .. attribute:: public_id *CharField*: The", "API. .. attribute:: api_version *CharField*: The version of the validation", "False if hexlify(otp.uid) != self.private_id.encode(): return False if otp.session <", "verification service. Defaults to Yubico's hosted API. .. attribute:: api_version", "help_text=\"Your API ID.\" ) api_key = models.CharField( max_length=64, blank=True, default='',", "responsees. (Default: 1) .. attribute:: api_key *CharField*: Your base64-encoded API", "binascii import hexlify, unhexlify from struct import pack import six", "verified = False if token[:-32] == self.public_id: client = self.service.get_client()", "syncing required.\" ) param_timeout = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\",", "= self.base_url return client class RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey", "to Yubico's hosted API.\" ) api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS,", "api_key, self.use_ssl) if self.base_url: client.base_url = self.base_url return client class", "16-byte AES key shared with this YubiKey (hex-encoded). .. attribute::", "16-byte AES key shared with this YubiKey (hex-encoded).\" ) session", "service *ForeignKey*: The validation service to use for this device.", "def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents a locally-verified", "verbose_name=\"Private ID\", help_text=\"The 6-byte private ID (hex-encoded).\" ) key =", "of the verification service. Defaults to Yubico's hosted API. ..", "models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public identity of the YubiKey (modhex-encoded).\")", "api_key *CharField*: Your base64-encoded API key, used to sign requests.", "if otp.session < self.session: return False if (otp.session == self.session)", "URL of the verification service. Defaults to Yubico's hosted API.", "YubiKey device\" def public_id(self): \"\"\" The public ID of this", "Meta(object): verbose_name = \"YubiKey validation service\" def __unicode__(self): return self.name", "from django_otp.models import Device from django_otp.util import hex_validator, random_hex from", "<= self.counter): return False # All tests pass. Update the", "this will point to Yubico's official hosted service, which you", "The volatile session usage counter most recently used by this", "self.bin_key) except Exception: return False if public_id != self.public_id(): return", "= models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public identity of the YubiKey", "models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte AES key shared with", "SSL\", help_text=\"Use HTTPS API URLs by default?\" ) param_sl =", "models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time to allow for", "attribute:: base_url *URLField*: The base URL of the verification service.", "certificates, this provides little benefit. (Default: ``False``). .. attribute:: param_sl", "verbose_name=\"API ID\", help_text=\"Your API ID.\" ) api_key = models.CharField( max_length=64,", "api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version of", "*CharField*: The public identity of the YubiKey (modhex-encoded). \"\"\" service", "public identity of the YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name =", "most recently used by this device.\" ) counter = models.PositiveIntegerField(", "if isinstance(token, six.text_type): token = token.encode('utf-8') try: public_id, otp =", "needs this to sign responsees. (Default: 1) .. attribute:: api_key", "YubiKey (hex-encoded).\" ) session = models.PositiveIntegerField( default=0, help_text=\"The non-volatile session", ".. attribute:: use_ssl *BooleanField*: If ``True``, we'll use the HTTPS", "import decode_otp def default_id(): return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value)", "import models from django.utils.encoding import force_text from django_otp.models import Device", "non-volatile session counter most recently used by this device. ..", "service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The", "\"\"\" API_VERSIONS = ['1.0', '1.1', '2.0'] name = models.CharField( max_length=32,", "to Yubico's hosted API. .. attribute:: api_version *CharField*: The version", "self.public_id: client = self.service.get_client() response = client.verify(token) verified = response.is_ok()", ") counter = models.PositiveIntegerField( default=0, help_text=\"The volatile session usage counter", "otp.session self.counter = otp.counter self.save() return True class ValidationService(models.Model): \"\"\"", "use_ssl = models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS API URLs", "return False # All tests pass. Update the counters and", "models.PositiveIntegerField( default=0, help_text=\"The volatile session usage counter most recently used", "private_id *CharField*: The 6-byte private ID (hex-encoded). .. attribute:: key", "otp.counter self.save() return True class ValidationService(models.Model): \"\"\" Represents a YubiKey", "'2.0') .. attribute:: use_ssl *BooleanField*: If ``True``, we'll use the", "self.base_url return client class RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey device", "primary key. \"\"\" return modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity'", "token.encode('utf-8') try: public_id, otp = decode_otp(token, self.bin_key) except Exception: return", "attribute:: counter *PositiveIntegerField*: The volatile session usage counter most recently", "ID (hex-encoded).\" ) key = models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The", "of the default URLs. Because :mod:`urllib2` does not verify certificates,", "param_sl = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The level of", "public_id(self): \"\"\" The public ID of this device is the", "return modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field = 'id'", "= \"Remote YubiKey device\" def verify_token(self, token): verified = False", "base URL of the verification service. Defaults to Yubico's hosted", "API_VERSIONS)), default='2.0', help_text=\"The version of the validation api to use.\"", "public ID of this device is the four-byte, big-endian, modhex-encoded", "models.URLField( blank=True, default='', verbose_name=\"Base URL\", help_text=\"The base URL of the", "self.session) and (otp.counter <= self.counter): return False # All tests", "use: '1.0', '1.1', or '2.0'. (Default: '2.0') .. attribute:: use_ssl", "from django.db import models from django.utils.encoding import force_text from django_otp.models", "'1.0', '1.1', or '2.0'. (Default: '2.0') .. attribute:: use_ssl *BooleanField*:", "help_text=\"The non-volatile session counter most recently used by this device.\"", "instances to point at any other service implementing the same", "verification service. Defaults to Yubico's hosted API.\" ) api_version =", "the counters and return the good news. self.session = otp.session", "help_text=\"The time to allow for syncing.\" ) class Meta(object): verbose_name", "the database. .. attribute:: service *ForeignKey*: The validation service to", "use_ssl *BooleanField*: If ``True``, we'll use the HTTPS versions of", "counter *PositiveIntegerField*: The volatile session usage counter most recently used", "False, self.param_sl or None, self.param_timeout or None) elif self.api_version ==", ".. attribute:: key *CharField*: The 16-byte AES key shared with", "service, which you can customize. You can also create instances", "big-endian, modhex-encoded primary key. \"\"\" return modhex(pack('>I', self.id)) public_id.short_description =", "can customize. You can also create instances to point at", "return the good news. self.session = otp.session self.counter = otp.counter", "max_length=64, blank=True, default='', verbose_name=\"API key\", help_text=\"Your base64-encoded API key.\" )", "models.CharField( max_length=32, help_text=\"The name of this validation service.\" ) api_id", "\"\"\" return modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field =", "def verify_token(self, token): if isinstance(token, six.text_type): token = token.encode('utf-8') try:", "attribute:: api_version *CharField*: The version of the validation API to", "help_text=\"The level of syncing required.\" ) param_timeout = models.CharField( max_length=16,", "from django_otp.util import hex_validator, random_hex from yubiotp.client import YubiClient10, YubiClient11,", "default_key(): return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\"", "(hex-encoded). .. attribute:: key *CharField*: The 16-byte AES key shared", "provides little benefit. (Default: ``False``). .. attribute:: param_sl *CharField*: The", "service. .. attribute:: api_id *IntegerField*: Your API ID. The server", "``''``) .. attribute:: base_url *URLField*: The base URL of the", "= \"YubiKey validation service\" def __unicode__(self): return self.name def get_client(self):", "self.param_timeout or None) elif self.api_version == '1.1': client = YubiClient11(self.api_id,", ") param_timeout = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time", "== '2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or", "from struct import pack import six from django.db import models", "verify certificates, this provides little benefit. (Default: ``False``). .. attribute::", "validation service to use for this device. .. attribute:: public_id", "most recently used by this device. .. attribute:: counter *PositiveIntegerField*:", "*IntegerField*: Your API ID. The server needs this to sign", "token): verified = False if token[:-32] == self.public_id: client =", "self.private_id.encode(): return False if otp.session < self.session: return False if", "with this YubiKey (hex-encoded).\" ) session = models.PositiveIntegerField( default=0, help_text=\"The", "sign requests. This is optional but strongly recommended. (Default: ``''``)", ":class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The time to allow for", "key\", help_text=\"Your base64-encoded API key.\" ) base_url = models.URLField( blank=True,", "in the database. .. attribute:: service *ForeignKey*: The validation service", "def id_validator(value): return hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def key_validator(value):", "(Default: 1) .. attribute:: api_key *CharField*: Your base64-encoded API key,", "*CharField*: Your base64-encoded API key, used to sign requests. This", "models from django.utils.encoding import force_text from django_otp.models import Device from", "device. \"\"\" private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\",", "session usage counter most recently used by this device. \"\"\"", "service implementing the same protocol. .. attribute:: name *CharField*: The", "Identity' public_id.admin_order_field = 'id' @property def bin_key(self): return unhexlify(self.key.encode()) def", "service. Defaults to Yubico's hosted API.\" ) api_version = models.CharField(", "validation API to use: '1.0', '1.1', or '2.0'. (Default: '2.0')", "recently used by this device.\" ) counter = models.PositiveIntegerField( default=0,", "but strongly recommended. (Default: ``''``) .. attribute:: base_url *URLField*: The", "server needs this to sign responsees. (Default: 1) .. attribute::", "for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0', '1.1', '2.0']", "The non-volatile session counter most recently used by this device.", "models.PositiveIntegerField( default=0, help_text=\"The non-volatile session counter most recently used by", "recently used by this device. .. attribute:: counter *PositiveIntegerField*: The", "modhex-encoded primary key. \"\"\" return modhex(pack('>I', self.id)) public_id.short_description = 'Public", "implementing the same protocol. .. attribute:: name *CharField*: The name", "URLs. Because :mod:`urllib2` does not verify certificates, this provides little", "counter most recently used by this device.\" ) class Meta(Device.Meta):", "import force_text from django_otp.models import Device from django_otp.util import hex_validator,", "to Yubico's official hosted service, which you can customize. You", "with this YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*: The non-volatile", ") api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version", "self.save() return True class ValidationService(models.Model): \"\"\" Represents a YubiKey validation", "The level of syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout", "of syncing required.\" ) param_timeout = models.CharField( max_length=16, blank=True, default=None,", "service. Defaults to Yubico's hosted API. .. attribute:: api_version *CharField*:", "this YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*: The non-volatile session", "= YubiClient11(self.api_id, api_key, self.use_ssl) else: client = YubiClient10(self.api_id, api_key, self.use_ssl)", "api_key = models.CharField( max_length=64, blank=True, default='', verbose_name=\"API key\", help_text=\"Your base64-encoded", "import hexlify, unhexlify from struct import pack import six from", "Exception: return False if public_id != self.public_id(): return False if", "ID\", help_text=\"The public identity of the YubiKey (modhex-encoded).\") class Meta(Device.Meta):", "by this device.\" ) counter = models.PositiveIntegerField( default=0, help_text=\"The volatile", "absolute_import, division, print_function, unicode_literals from base64 import b64decode from binascii", "def public_id(self): \"\"\" The public ID of this device is", "verbose_name = \"YubiKey validation service\" def __unicode__(self): return self.name def", "YubikeyDevice(Device): \"\"\" Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute::", "api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None) elif", "client class RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey device that is", "used by this device.\" ) counter = models.PositiveIntegerField( default=0, help_text=\"The", ".. attribute:: private_id *CharField*: The 6-byte private ID (hex-encoded). ..", "= models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time to allow", "API ID. The server needs this to sign responsees. (Default:", "= \"Local YubiKey device\" def public_id(self): \"\"\" The public ID", "good news. self.session = otp.session self.counter = otp.counter self.save() return", "attribute:: param_sl *CharField*: The level of syncing required. See :class:`~yubiotp.client.YubiClient20`.", "except Exception: return False if public_id != self.public_id(): return False", "device\" def verify_token(self, token): verified = False if token[:-32] ==", "counter most recently used by this device.\" ) counter =", "client = self.service.get_client() response = client.verify(token) verified = response.is_ok() return", "key, used to sign requests. This is optional but strongly", "hex_validator, random_hex from yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex", "= YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or", "\"Remote YubiKey device\" def verify_token(self, token): verified = False if", "= models.CharField( max_length=32, help_text=\"The name of this validation service.\" )", ".. attribute:: session *PositiveIntegerField*: The non-volatile session counter most recently", "or None) elif self.api_version == '1.1': client = YubiClient11(self.api_id, api_key,", "modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field = 'id' @property", "def default_key(): return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device):", "blank=True, default='', verbose_name=\"API key\", help_text=\"Your base64-encoded API key.\" ) base_url", "default='2.0', help_text=\"The version of the validation api to use.\" )", "to allow for syncing.\" ) class Meta(object): verbose_name = \"YubiKey", ") class Meta(object): verbose_name = \"YubiKey validation service\" def __unicode__(self):", "at least one :class:`~otp_yubikey.models.ValidationService` in the database. .. attribute:: service", "device.\" ) class Meta(Device.Meta): verbose_name = \"Local YubiKey device\" def", "models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The level of syncing required.\"", "django_otp.util import hex_validator, random_hex from yubiotp.client import YubiClient10, YubiClient11, YubiClient20", "OTP :class:`~django_otp.models.Device`. .. attribute:: private_id *CharField*: The 6-byte private ID", "most recently used by this device. \"\"\" private_id = models.CharField(", "*CharField*: The version of the validation API to use: '1.0',", "YubiClient11, YubiClient20 from yubiotp.modhex import modhex from yubiotp.otp import decode_otp", "models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte private ID", "syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0', '1.1', '2.0'] name", "a YubiKey validation web service. By default, this will point", "= models.PositiveIntegerField( default=0, help_text=\"The volatile session usage counter most recently", "verbose_name=\"Use SSL\", help_text=\"Use HTTPS API URLs by default?\" ) param_sl", "force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def", "point at any other service implementing the same protocol. ..", "key = models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte AES key", "for syncing.\" ) class Meta(object): verbose_name = \"YubiKey validation service\"", "of the validation API to use: '1.0', '1.1', or '2.0'.", "*URLField*: The base URL of the verification service. Defaults to", "devices, you must have at least one :class:`~otp_yubikey.models.ValidationService` in the", "benefit. (Default: ``False``). .. attribute:: param_sl *CharField*: The level of", "['1.0', '1.1', '2.0'] name = models.CharField( max_length=32, help_text=\"The name of", "validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte private ID (hex-encoded).\" )", "default=None, verbose_name=\"Timeout\", help_text=\"The time to allow for syncing.\" ) class", "to sign responsees. (Default: 1) .. attribute:: api_key *CharField*: Your", "= decode_otp(token, self.bin_key) except Exception: return False if public_id !=", "name *CharField*: The name of this validation service. .. attribute::", "return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents a locally-verified YubiKey OTP", "attribute:: use_ssl *BooleanField*: If ``True``, we'll use the HTTPS versions", "by this device.\" ) class Meta(Device.Meta): verbose_name = \"Local YubiKey", "return False if (otp.session == self.session) and (otp.counter <= self.counter):", "return True class ValidationService(models.Model): \"\"\" Represents a YubiKey validation web", "Device from django_otp.util import hex_validator, random_hex from yubiotp.client import YubiClient10,", "= models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The level of syncing", "base_url = models.URLField( blank=True, default='', verbose_name=\"Base URL\", help_text=\"The base URL", "validators=[key_validator], default=default_key, help_text=\"The 16-byte AES key shared with this YubiKey", "YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import modhex from yubiotp.otp import", "of this validation service. .. attribute:: api_id *IntegerField*: Your API", "= ['1.0', '1.1', '2.0'] name = models.CharField( max_length=32, help_text=\"The name", "name of this validation service.\" ) api_id = models.IntegerField( default=1,", "URLs by default?\" ) param_sl = models.CharField( max_length=16, blank=True, default=None,", "'2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None,", "= models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte AES key shared", "will point to Yubico's official hosted service, which you can", "= 'id' @property def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token):", "= token.encode('utf-8') try: public_id, otp = decode_otp(token, self.bin_key) except Exception:", ") use_ssl = models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS API", "protocol. .. attribute:: name *CharField*: The name of this validation", "(otp.session == self.session) and (otp.counter <= self.counter): return False #", "django.utils.encoding import force_text from django_otp.models import Device from django_otp.util import", "api_key = b64decode(self.api_key.encode()) or None if self.api_version == '2.0': client", "the HTTPS versions of the default URLs. Because :mod:`urllib2` does", "\"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public ID\",", "Because :mod:`urllib2` does not verify certificates, this provides little benefit.", "You can also create instances to point at any other", "counter = models.PositiveIntegerField( default=0, help_text=\"The volatile session usage counter most", "default='', verbose_name=\"API key\", help_text=\"Your base64-encoded API key.\" ) base_url =", "max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte AES key shared with this", "help_text=\"The 16-byte AES key shared with this YubiKey (hex-encoded).\" )", "= models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version of the", "See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The time to allow", "class RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey device that is to", "time to allow for syncing.\" ) class Meta(object): verbose_name =", "used by this device. \"\"\" private_id = models.CharField( max_length=12, validators=[id_validator],", "the default URLs. Because :mod:`urllib2` does not verify certificates, this", "division, print_function, unicode_literals from base64 import b64decode from binascii import", "service to use for this device. .. attribute:: public_id *CharField*:", "this device. .. attribute:: public_id *CharField*: The public identity of", "this YubiKey (hex-encoded).\" ) session = models.PositiveIntegerField( default=0, help_text=\"The non-volatile", "choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version of the validation api to", "yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import modhex from", ") param_sl = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The level", "if public_id != self.public_id(): return False if hexlify(otp.uid) != self.private_id.encode():", "which you can customize. You can also create instances to", "default=0, help_text=\"The volatile session usage counter most recently used by", "hosted API.\" ) api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0',", "public_id, otp = decode_otp(token, self.bin_key) except Exception: return False if", ") session = models.PositiveIntegerField( default=0, help_text=\"The non-volatile session counter most", "required.\" ) param_timeout = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The", "key. \"\"\" return modhex(pack('>I', self.id)) public_id.short_description = 'Public Identity' public_id.admin_order_field", "six.text_type): token = token.encode('utf-8') try: public_id, otp = decode_otp(token, self.bin_key)", "self.base_url: client.base_url = self.base_url return client class RemoteYubikeyDevice(Device): \"\"\" Represents", "public_id *CharField*: The public identity of the YubiKey (modhex-encoded). \"\"\"", "modhex from yubiotp.otp import decode_otp def default_id(): return force_text(random_hex(6)) def", "requests. This is optional but strongly recommended. (Default: ``''``) ..", "unhexlify from struct import pack import six from django.db import", "public_id.short_description = 'Public Identity' public_id.admin_order_field = 'id' @property def bin_key(self):", "sign responsees. (Default: 1) .. attribute:: api_key *CharField*: Your base64-encoded", "b64decode(self.api_key.encode()) or None if self.api_version == '2.0': client = YubiClient20(self.api_id,", "can also create instances to point at any other service", "Meta(Device.Meta): verbose_name = \"Local YubiKey device\" def public_id(self): \"\"\" The", "public identity of the YubiKey (modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService,", "verbose_name = \"Local YubiKey device\" def public_id(self): \"\"\" The public", "The public identity of the YubiKey (modhex-encoded). \"\"\" service =", "ID (hex-encoded). .. attribute:: key *CharField*: The 16-byte AES key", "blank=True, default='', verbose_name=\"Base URL\", help_text=\"The base URL of the verification", "and return the good news. self.session = otp.session self.counter =", "By default, this will point to Yubico's official hosted service,", "YubiClient11(self.api_id, api_key, self.use_ssl) else: client = YubiClient10(self.api_id, api_key, self.use_ssl) if", "def verify_token(self, token): verified = False if token[:-32] == self.public_id:", "random_hex from yubiotp.client import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import", "use.\" ) use_ssl = models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS", "\"YubiKey validation service\" def __unicode__(self): return self.name def get_client(self): api_key", "(Default: ``''``) .. attribute:: base_url *URLField*: The base URL of", "recently used by this device. \"\"\" private_id = models.CharField( max_length=12,", "attribute:: api_key *CharField*: Your base64-encoded API key, used to sign", "this device.\" ) class Meta(Device.Meta): verbose_name = \"Local YubiKey device\"", "The base URL of the verification service. Defaults to Yubico's", "this device is the four-byte, big-endian, modhex-encoded primary key. \"\"\"", "YubiClient20 from yubiotp.modhex import modhex from yubiotp.otp import decode_otp def", "is the four-byte, big-endian, modhex-encoded primary key. \"\"\" return modhex(pack('>I',", "hexlify(otp.uid) != self.private_id.encode(): return False if otp.session < self.session: return", "syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The time", "public_id = models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public identity of the", "return False if public_id != self.public_id(): return False if hexlify(otp.uid)", "this device.\" ) counter = models.PositiveIntegerField( default=0, help_text=\"The volatile session", "default URLs. Because :mod:`urllib2` does not verify certificates, this provides", "this validation service.\" ) api_id = models.IntegerField( default=1, verbose_name=\"API ID\",", "(modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public", "pass. Update the counters and return the good news. self.session", "# All tests pass. Update the counters and return the", "to use.\" ) use_ssl = models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use", "hexlify, unhexlify from struct import pack import six from django.db", "def get_client(self): api_key = b64decode(self.api_key.encode()) or None if self.api_version ==", "validation api to use.\" ) use_ssl = models.BooleanField( default=False, verbose_name=\"Use", "class ValidationService(models.Model): \"\"\" Represents a YubiKey validation web service. By", "= self.service.get_client() response = client.verify(token) verified = response.is_ok() return verified", "any other service implementing the same protocol. .. attribute:: name", "models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your API ID.\" ) api_key =", "(Default: ``False``). .. attribute:: param_sl *CharField*: The level of syncing", "to be verified with a remote validation service. In order", "*CharField*: The 6-byte private ID (hex-encoded). .. attribute:: key *CharField*:", "max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time to allow for syncing.\"", "Represents a YubiKey device that is to be verified with", "the same protocol. .. attribute:: name *CharField*: The name of", "param_timeout = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time to", "to allow for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0',", "import Device from django_otp.util import hex_validator, random_hex from yubiotp.client import", ".. attribute:: public_id *CharField*: The public identity of the YubiKey", "base64-encoded API key.\" ) base_url = models.URLField( blank=True, default='', verbose_name=\"Base", "ID\", help_text=\"Your API ID.\" ) api_key = models.CharField( max_length=64, blank=True,", "help_text=\"The version of the validation api to use.\" ) use_ssl", "self.session: return False if (otp.session == self.session) and (otp.counter <=", "identity of the YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name = \"Remote", "validation service. .. attribute:: api_id *IntegerField*: Your API ID. The", "YubiKey validation web service. By default, this will point to", "models.BooleanField( default=False, verbose_name=\"Use SSL\", help_text=\"Use HTTPS API URLs by default?\"", "base64 import b64decode from binascii import hexlify, unhexlify from struct", "session = models.PositiveIntegerField( default=0, help_text=\"The non-volatile session counter most recently", "(Default: '2.0') .. attribute:: use_ssl *BooleanField*: If ``True``, we'll use", "default_id(): return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def default_key(): return", "verbose_name=\"Public ID\", help_text=\"The public identity of the YubiKey (modhex-encoded).\") class", "Yubico's hosted API. .. attribute:: api_version *CharField*: The version of", "= models.CharField( max_length=64, blank=True, default='', verbose_name=\"API key\", help_text=\"Your base64-encoded API", "or None if self.api_version == '2.0': client = YubiClient20(self.api_id, api_key,", "allow for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0', '1.1',", "ValidationService(models.Model): \"\"\" Represents a YubiKey validation web service. By default,", "True class ValidationService(models.Model): \"\"\" Represents a YubiKey validation web service.", "= YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url = self.base_url return", "struct import pack import six from django.db import models from", "yubiotp.modhex import modhex from yubiotp.otp import decode_otp def default_id(): return", "*CharField*: The time to allow for syncing. See :class:`~yubiotp.client.YubiClient20`. \"\"\"", "\"\"\" Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. .. attribute:: private_id", "pack import six from django.db import models from django.utils.encoding import", "def default_id(): return force_text(random_hex(6)) def id_validator(value): return hex_validator(6)(value) def default_key():", "client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout", "validation service\" def __unicode__(self): return self.name def get_client(self): api_key =", ".. attribute:: api_id *IntegerField*: Your API ID. The server needs", "most recently used by this device.\" ) class Meta(Device.Meta): verbose_name", "key.\" ) base_url = models.URLField( blank=True, default='', verbose_name=\"Base URL\", help_text=\"The", "if (otp.session == self.session) and (otp.counter <= self.counter): return False", "default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte private ID (hex-encoded).\" ) key", "the validation API to use: '1.0', '1.1', or '2.0'. (Default:", "if hexlify(otp.uid) != self.private_id.encode(): return False if otp.session < self.session:", "import YubiClient10, YubiClient11, YubiClient20 from yubiotp.modhex import modhex from yubiotp.otp", "by default?\" ) param_sl = models.CharField( max_length=16, blank=True, default=None, verbose_name=\"SL\",", "base_url *URLField*: The base URL of the verification service. Defaults", ".. attribute:: param_timeout *CharField*: The time to allow for syncing.", "key *CharField*: The 16-byte AES key shared with this YubiKey", "return force_text(random_hex(16)) def key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents", "class YubikeyDevice(Device): \"\"\" Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`. ..", "!= self.private_id.encode(): return False if otp.session < self.session: return False", "self.name def get_client(self): api_key = b64decode(self.api_key.encode()) or None if self.api_version", "self.use_ssl) if self.base_url: client.base_url = self.base_url return client class RemoteYubikeyDevice(Device):", "URL\", help_text=\"The base URL of the verification service. Defaults to", "allow for syncing.\" ) class Meta(object): verbose_name = \"YubiKey validation", "The public ID of this device is the four-byte, big-endian,", "AES key shared with this YubiKey (hex-encoded).\" ) session =", "max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte private ID (hex-encoded).\"", "print_function, unicode_literals from base64 import b64decode from binascii import hexlify,", "else: client = YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url =", "= models.PositiveIntegerField( default=0, help_text=\"The non-volatile session counter most recently used", "to point at any other service implementing the same protocol.", "YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*: The non-volatile session counter", "of the validation api to use.\" ) use_ssl = models.BooleanField(", "help_text=\"The name of this validation service.\" ) api_id = models.IntegerField(", "API to use: '1.0', '1.1', or '2.0'. (Default: '2.0') ..", "version of the validation api to use.\" ) use_ssl =", "is optional but strongly recommended. (Default: ``''``) .. attribute:: base_url", "verify_token(self, token): verified = False if token[:-32] == self.public_id: client", "'id' @property def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self, token): if", "the four-byte, big-endian, modhex-encoded primary key. \"\"\" return modhex(pack('>I', self.id))", "does not verify certificates, this provides little benefit. (Default: ``False``).", "validation service. In order create these devices, you must have", "four-byte, big-endian, modhex-encoded primary key. \"\"\" return modhex(pack('>I', self.id)) public_id.short_description", "b64decode from binascii import hexlify, unhexlify from struct import pack", "*PositiveIntegerField*: The non-volatile session counter most recently used by this", ".. attribute:: base_url *URLField*: The base URL of the verification", "of this device is the four-byte, big-endian, modhex-encoded primary key.", ") base_url = models.URLField( blank=True, default='', verbose_name=\"Base URL\", help_text=\"The base", "also create instances to point at any other service implementing", "order create these devices, you must have at least one", "session counter most recently used by this device. .. attribute::", "for this device. .. attribute:: public_id *CharField*: The public identity", "or '2.0'. (Default: '2.0') .. attribute:: use_ssl *BooleanField*: If ``True``,", "session counter most recently used by this device.\" ) counter", "verbose_name=\"Base URL\", help_text=\"The base URL of the verification service. Defaults", "this validation service. .. attribute:: api_id *IntegerField*: Your API ID.", "these devices, you must have at least one :class:`~otp_yubikey.models.ValidationService` in", "help_text=\"The base URL of the verification service. Defaults to Yubico's", "param_sl *CharField*: The level of syncing required. See :class:`~yubiotp.client.YubiClient20`. ..", "return self.name def get_client(self): api_key = b64decode(self.api_key.encode()) or None if", "YubiKey device\" def verify_token(self, token): verified = False if token[:-32]", "'2.0'] name = models.CharField( max_length=32, help_text=\"The name of this validation", "self.api_version == '2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl", "default=1, verbose_name=\"API ID\", help_text=\"Your API ID.\" ) api_key = models.CharField(", "api to use.\" ) use_ssl = models.BooleanField( default=False, verbose_name=\"Use SSL\",", "we'll use the HTTPS versions of the default URLs. Because", "return unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token, six.text_type): token =", "required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The time to", "\"\"\" private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The", "None if self.api_version == '2.0': client = YubiClient20(self.api_id, api_key, self.use_ssl,", "if token[:-32] == self.public_id: client = self.service.get_client() response = client.verify(token)", "API URLs by default?\" ) param_sl = models.CharField( max_length=16, blank=True,", "default=0, help_text=\"The non-volatile session counter most recently used by this", "models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version of the validation", "self.use_ssl) else: client = YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url", "'Public Identity' public_id.admin_order_field = 'id' @property def bin_key(self): return unhexlify(self.key.encode())", "to sign requests. This is optional but strongly recommended. (Default:", ":class:`~yubiotp.client.YubiClient20`. \"\"\" API_VERSIONS = ['1.0', '1.1', '2.0'] name = models.CharField(", "ID.\" ) api_key = models.CharField( max_length=64, blank=True, default='', verbose_name=\"API key\",", "max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The version of the validation api", "Defaults to Yubico's hosted API.\" ) api_version = models.CharField( max_length=8,", "6-byte private ID (hex-encoded).\" ) key = models.CharField( max_length=32, validators=[key_validator],", "isinstance(token, six.text_type): token = token.encode('utf-8') try: public_id, otp = decode_otp(token,", "def __unicode__(self): return self.name def get_client(self): api_key = b64decode(self.api_key.encode()) or", "False if (otp.session == self.session) and (otp.counter <= self.counter): return", "name of this validation service. .. attribute:: api_id *IntegerField*: Your", "client.base_url = self.base_url return client class RemoteYubikeyDevice(Device): \"\"\" Represents a", "from django.utils.encoding import force_text from django_otp.models import Device from django_otp.util", "private_id = models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte", "recently used by this device.\" ) class Meta(Device.Meta): verbose_name =", "False if token[:-32] == self.public_id: client = self.service.get_client() response =", "API key.\" ) base_url = models.URLField( blank=True, default='', verbose_name=\"Base URL\",", "client = YubiClient10(self.api_id, api_key, self.use_ssl) if self.base_url: client.base_url = self.base_url", "HTTPS versions of the default URLs. Because :mod:`urllib2` does not", "unhexlify(self.key.encode()) def verify_token(self, token): if isinstance(token, six.text_type): token = token.encode('utf-8')", "one :class:`~otp_yubikey.models.ValidationService` in the database. .. attribute:: service *ForeignKey*: The", "API.\" ) api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)), default='2.0', help_text=\"The", "Yubico's hosted API.\" ) api_version = models.CharField( max_length=8, choices=list(zip(API_VERSIONS, API_VERSIONS)),", ".. attribute:: service *ForeignKey*: The validation service to use for", "key shared with this YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*:", "attribute:: name *CharField*: The name of this validation service. ..", "``True``, we'll use the HTTPS versions of the default URLs.", "*BooleanField*: If ``True``, we'll use the HTTPS versions of the", "by this device. .. attribute:: counter *PositiveIntegerField*: The volatile session", "default, this will point to Yubico's official hosted service, which", "validation service.\" ) api_id = models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your", "verbose_name=\"Timeout\", help_text=\"The time to allow for syncing.\" ) class Meta(object):", "help_text=\"The 6-byte private ID (hex-encoded).\" ) key = models.CharField( max_length=32,", "tests pass. Update the counters and return the good news.", "\"\"\" Represents a YubiKey validation web service. By default, this", "django.db import models from django.utils.encoding import force_text from django_otp.models import", "'1.1', '2.0'] name = models.CharField( max_length=32, help_text=\"The name of this", "api_id *IntegerField*: Your API ID. The server needs this to", "*ForeignKey*: The validation service to use for this device. ..", "public_id != self.public_id(): return False if hexlify(otp.uid) != self.private_id.encode(): return", "syncing.\" ) class Meta(object): verbose_name = \"YubiKey validation service\" def", "name = models.CharField( max_length=32, help_text=\"The name of this validation service.\"", "= 'Public Identity' public_id.admin_order_field = 'id' @property def bin_key(self): return", "ID\", help_text=\"The 6-byte private ID (hex-encoded).\" ) key = models.CharField(", "shared with this YubiKey (hex-encoded). .. attribute:: session *PositiveIntegerField*: The", "ID. The server needs this to sign responsees. (Default: 1)", "this device. .. attribute:: counter *PositiveIntegerField*: The volatile session usage", "blank=True, default=None, verbose_name=\"Timeout\", help_text=\"The time to allow for syncing.\" )", "used by this device. .. attribute:: counter *PositiveIntegerField*: The volatile", "False # All tests pass. Update the counters and return", "Your base64-encoded API key, used to sign requests. This is", "default=None, verbose_name=\"SL\", help_text=\"The level of syncing required.\" ) param_timeout =", "the good news. self.session = otp.session self.counter = otp.counter self.save()", "class Meta(object): verbose_name = \"YubiKey validation service\" def __unicode__(self): return", "id_validator(value): return hex_validator(6)(value) def default_key(): return force_text(random_hex(16)) def key_validator(value): return", "public_id.admin_order_field = 'id' @property def bin_key(self): return unhexlify(self.key.encode()) def verify_token(self,", "least one :class:`~otp_yubikey.models.ValidationService` in the database. .. attribute:: service *ForeignKey*:", "attribute:: param_timeout *CharField*: The time to allow for syncing. See", "= models.CharField( max_length=12, validators=[id_validator], default=default_id, verbose_name=\"Private ID\", help_text=\"The 6-byte private", "All tests pass. Update the counters and return the good", "hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents a locally-verified YubiKey OTP :class:`~django_otp.models.Device`.", "False if public_id != self.public_id(): return False if hexlify(otp.uid) !=", "of syncing required. See :class:`~yubiotp.client.YubiClient20`. .. attribute:: param_timeout *CharField*: The", "'1.1', or '2.0'. (Default: '2.0') .. attribute:: use_ssl *BooleanField*: If", "the verification service. Defaults to Yubico's hosted API. .. attribute::", "yubiotp.otp import decode_otp def default_id(): return force_text(random_hex(6)) def id_validator(value): return", "create instances to point at any other service implementing the", "and (otp.counter <= self.counter): return False # All tests pass.", "web service. By default, this will point to Yubico's official", "official hosted service, which you can customize. You can also", "device is the four-byte, big-endian, modhex-encoded primary key. \"\"\" return", "The 16-byte AES key shared with this YubiKey (hex-encoded). ..", "verbose_name = \"Remote YubiKey device\" def verify_token(self, token): verified =", "token): if isinstance(token, six.text_type): token = token.encode('utf-8') try: public_id, otp", "of the YubiKey (modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id", "the YubiKey (modhex-encoded). \"\"\" service = models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id =", "(hex-encoded).\" ) session = models.PositiveIntegerField( default=0, help_text=\"The non-volatile session counter", "otp = decode_otp(token, self.bin_key) except Exception: return False if public_id", "private ID (hex-encoded).\" ) key = models.CharField( max_length=32, validators=[key_validator], default=default_key,", "max_length=16, blank=True, default=None, verbose_name=\"SL\", help_text=\"The level of syncing required.\" )", "help_text=\"The volatile session usage counter most recently used by this", "self.public_id(): return False if hexlify(otp.uid) != self.private_id.encode(): return False if", "device that is to be verified with a remote validation", "__future__ import absolute_import, division, print_function, unicode_literals from base64 import b64decode", "help_text=\"The public identity of the YubiKey (modhex-encoded).\") class Meta(Device.Meta): verbose_name", "key_validator(value): return hex_validator(16)(value) class YubikeyDevice(Device): \"\"\" Represents a locally-verified YubiKey", "from base64 import b64decode from binascii import hexlify, unhexlify from", "return client class RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey device that", "customize. You can also create instances to point at any", "blank=True, default=None, verbose_name=\"SL\", help_text=\"The level of syncing required.\" ) param_timeout", "AES key shared with this YubiKey (hex-encoded). .. attribute:: session", "other service implementing the same protocol. .. attribute:: name *CharField*:", "help_text=\"Use HTTPS API URLs by default?\" ) param_sl = models.CharField(", "service. By default, this will point to Yubico's official hosted", "verify_token(self, token): if isinstance(token, six.text_type): token = token.encode('utf-8') try: public_id,", "= otp.counter self.save() return True class ValidationService(models.Model): \"\"\" Represents a", "service.\" ) api_id = models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your API", "None) elif self.api_version == '1.1': client = YubiClient11(self.api_id, api_key, self.use_ssl)", "RemoteYubikeyDevice(Device): \"\"\" Represents a YubiKey device that is to be", "self.counter): return False # All tests pass. Update the counters", "unicode_literals from base64 import b64decode from binascii import hexlify, unhexlify", "from yubiotp.otp import decode_otp def default_id(): return force_text(random_hex(6)) def id_validator(value):", "api_id = models.IntegerField( default=1, verbose_name=\"API ID\", help_text=\"Your API ID.\" )", "= models.ForeignKey(ValidationService, on_delete=models.CASCADE) public_id = models.CharField(max_length=32, verbose_name=\"Public ID\", help_text=\"The public", "counter most recently used by this device. .. attribute:: counter", "this to sign responsees. (Default: 1) .. attribute:: api_key *CharField*:", ".. attribute:: api_key *CharField*: Your base64-encoded API key, used to", "from __future__ import absolute_import, division, print_function, unicode_literals from base64 import", ") class Meta(Device.Meta): verbose_name = \"Local YubiKey device\" def public_id(self):", "a YubiKey device that is to be verified with a", ") key = models.CharField( max_length=32, validators=[key_validator], default=default_key, help_text=\"The 16-byte AES", ") api_key = models.CharField( max_length=64, blank=True, default='', verbose_name=\"API key\", help_text=\"Your", "__unicode__(self): return self.name def get_client(self): api_key = b64decode(self.api_key.encode()) or None", "URL of the verification service. Defaults to Yubico's hosted API.\"", "(modhex-encoded).\") class Meta(Device.Meta): verbose_name = \"Remote YubiKey device\" def verify_token(self,", "import pack import six from django.db import models from django.utils.encoding", "If ``True``, we'll use the HTTPS versions of the default", "from yubiotp.modhex import modhex from yubiotp.otp import decode_otp def default_id():", "session usage counter most recently used by this device.\" )", "Update the counters and return the good news. self.session =", "to use: '1.0', '1.1', or '2.0'. (Default: '2.0') .. attribute::", "self.use_ssl, False, self.param_sl or None, self.param_timeout or None) elif self.api_version" ]
[ "object_name == \"event_chest_special_appear\": return HsvFilter(0, 124, 62, 88, 217, 246,", "217, 246, 0, 0, 0, 0) if object_name == \"inventory_green_item\":", "30, 197, 0, 0, 40, 38), [10, 145, 1084, 684]", "30, 34), [1100, 50, 1260, 210] if object_name == \"gate_map_pos\":", "custom data structure to hold the state of an HSV", "= hMax self.sMax = sMax self.vMax = vMax self.sAdd =", "73, 94, 106, 255, 255, 0, 0, 0, 0), [1083,", "135, 31, 240, 217, 0, 0, 0, 0), [460, 420,", "855, 710] if object_name == \"prompt_press_x_pickup\": return HsvFilter(78, 110, 110,", "object_name == \"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8,", "0, 0, 0) if object_name == \"button_repair\": return None, [208,", "124, 62, 88, 217, 246, 0, 0, 0, 0) if", "the way as it's a chonk # For a given", "\"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123, 29, 255, 255, 0, 0,", "255, 0, 0, 0, 0), [10, 145, 1084, 684] if", "[] if object_name == \"button_choose_map\": return None, [] if object_name", "= vMax self.sAdd = sAdd self.sSub = sSub self.vAdd =", "if object_name == \"button_choose_map\": return None, [] if object_name ==", "86, 73, 255, 255, 0, 0, 0, 0), [485, 280,", "vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin =", "139, 91, 30, 197, 0, 0, 40, 38), [10, 145,", "if object_name == \"gate_map_pos\": # This is a very difficult", "0), [10, 145, 1084, 684] if object_name == \"loot_near\": return", "as it's a chonk # For a given item string", "\"enemy_map_loc\": #print(\"Using enemy location filter\") if kwargs.get(\"big_map\"): return HsvFilter(0, 128,", "None, [] if object_name == \"button_inv_equipment\": return None, [] if", "255, 0, 0, 0, 0) if object_name == \"event_chest_special_appear\": return", "600] # These are all To be done later if", "== \"event_card_trade\": return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "**kwargs) -> typing.Tuple[HsvFilter, list]: if object_name is None: #print(\"Using default", "if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255, 255, 0,", "return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0,", "def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if object_name is None:", "0, 139, 91, 30, 197, 0, 0, 40, 38), [10,", "0, 0) if object_name == \"inventory_yellow_item\": # This is a", "684] if object_name == \"loot_near\": return HsvFilter(0, 155, 135, 31,", "sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin = hMin self.sMin = sMin", "if object_name == \"button_open_store\": return None, [] if object_name ==", "37), [1100, 50, 1260, 210] if object_name == \"loot_distant\": return", "54, 24, 38) if object_name == \"prompt_select_card\": return HsvFilter(79, 149,", "684] if object_name == \"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0, 128,", "65, 255, 0, 0, 0, 17), [464, 600, 855, 680]", "1084, 684] if object_name == \"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0,", "return HsvFilter(37, 147, 0, 61, 255, 255, 0, 0, 0,", "return None, [208, 600] # These are all To be", "0), [10, 145, 1084, 684] return HsvFilter(0, 0, 0, 255,", "38, 28, 152, 124, 0, 0, 5, 12), [10, 145,", "sSub=None, vAdd=None, vSub=None): self.hMin = hMin self.sMin = sMin self.vMin", "of the way as it's a chonk # For a", "214, 179, 65, 255, 0, 0, 0, 17), [464, 600,", "\"enemy_arrow\": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "optimal filter and the correct position to look def grab_object_preset(object_name=None,", "0, 0, 0, 0), [10, 145, 1084, 684] # Buttons", "vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin = hMin self.sMin =", "state of an HSV filter class HsvFilter: def __init__(self, hMin=None,", "92, 105, 255, 225, 0, 54, 24, 38) if object_name", "typing.Tuple[HsvFilter, list]: if object_name is None: #print(\"Using default filter\") return", "1084, 684] return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "if object_name == \"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86,", "0, 70, 37), [1100, 50, 1260, 210] if object_name ==", "684] if object_name == \"message_boss_encounter\": return HsvFilter(0, 92, 128, 13,", "0, 0, 255, 255, 255, 0, 0, 0, 0) if", "way as it's a chonk # For a given item", "if object_name == \"button_explore_again\": return None, [] if object_name ==", "280, 900, 734] return HsvFilter(16, 172, 194, 32, 255, 255,", "280, 900, 734] return HsvFilter(31, 94, 86, 73, 255, 255,", "0, 11, 32, 21) if object_name == \"inventory_purple_item\": return HsvFilter(126,", "To be done later if object_name == \"event_card_trade\": return HsvFilter(0,", "0, 255, 255, 255, 0, 0, 0, 0) if object_name", "of an HSV filter class HsvFilter: def __init__(self, hMin=None, sMin=None,", "194, 32, 255, 255, 0, 0, 70, 37), [1100, 50,", "HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70, 37),", "data structure to hold the state of an HSV filter", "is None: #print(\"Using default filter\") return HsvFilter(0, 0, 0, 255,", "700] if object_name == \"loot_chest_normal\": # This is a difficult", "sMin self.vMin = vMin self.hMax = hMax self.sMax = sMax", "0) if object_name == \"inventory_yellow_item\": # This is a dangerous", "== \"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0, 255, 255,", "217, 0, 0, 0, 0), [460, 420, 855, 710] if", "11, 32, 21) if object_name == \"inventory_purple_item\": return HsvFilter(126, 153,", "return HsvFilter(0, 92, 123, 29, 255, 255, 0, 0, 0,", "return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0,", "return HsvFilter(0, 73, 94, 106, 255, 255, 0, 0, 0,", "distinguish against green items and vice versa return HsvFilter(19, 91,", "0) if object_name == \"event_chest_special_appear\": return HsvFilter(0, 124, 62, 88,", "1084, 684] if object_name == \"cards\": return HsvFilter(0, 0, 0,", "return HsvFilter(78, 110, 110, 97, 189, 255, 0, 0, 0,", "HsvFilter(126, 153, 0, 255, 255, 255, 0, 0, 0, 0)", "255, 0, 0, 0, 0), [485, 280, 900, 734] return", "[10, 145, 1084, 684] if object_name == \"message_boss_encounter\": return HsvFilter(0,", "# This is a very difficult one to separate if", "HsvFilter(72, 98, 92, 105, 255, 225, 0, 54, 24, 38)", "1260, 210] if object_name == \"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31,", "return HsvFilter(0, 34, 38, 28, 152, 124, 0, 0, 5,", "0) if object_name == \"inventory_green_item\": return HsvFilter(37, 147, 0, 61,", "[485, 280, 900, 734] return HsvFilter(31, 94, 86, 73, 255,", "\"button_open_store\": return None, [] if object_name == \"button_go_town\": return None,", "100] if object_name == \"enemy_arrow\": return HsvFilter(0, 0, 0, 255,", "32, 255, 255, 0, 0, 70, 37), [1100, 50, 1260,", "5, 12), [10, 145, 1084, 684] if object_name == \"map_outline\":", "0, 40, 38), [10, 145, 1084, 684] if object_name ==", "0, 0, 255, 255, 255, 0, 0, 0, 0), [10,", "0, 255, 255, 255, 0, 0, 0, 0), [735, 32,", "HsvFilter(14, 116, 33, 32, 210, 59, 16, 0, 3, 0),", "object_name == \"button_inv_consume\": return None, [] if object_name == \"button_inv_other\":", "kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0,", "188, 0, 0, 0, 0) if object_name == \"inventory_yellow_item\": #", "None, [] if object_name == \"button_repair_confirm\": return None, [] if", "object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194, 32,", "680] if object_name == \"message_go\": return HsvFilter(32, 114, 89, 58,", "object_name == \"event_card_trade\": return HsvFilter(0, 0, 0, 255, 255, 255,", "255, 255, 0, 0, 0, 0) if object_name == \"button_repair\":", "[1100, 50, 1260, 210] if object_name == \"loot_distant\": return HsvFilter(14,", "return HsvFilter(32, 114, 89, 58, 255, 255, 0, 12, 0,", "if object_name == \"enemy_nametag\": return HsvFilter(49, 0, 139, 91, 30,", "vice versa return HsvFilter(19, 91, 107, 31, 168, 181, 0,", "0, 0, 0, 0) if object_name == \"event_otherworld\": return HsvFilter(0,", "210] if object_name == \"prompt_move_reward_screen\": return HsvFilter(72, 98, 92, 105,", "# Putting this here out of the way as it's", "HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0)", "it can barely # distinguish against green items and vice", "97, 189, 255, 0, 0, 0, 0), [1080, 660, 1255,", "210] if object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16, 172,", "0), [460, 420, 855, 710] if object_name == \"prompt_press_x_pickup\": return", "# This is a dangerous one as it can barely", "86, 73, 255, 255, 0, 0, 0, 0), [1100, 50,", "HsvFilter(79, 169, 0, 109, 246, 188, 0, 0, 0, 0)", "0, 0, 0, 0) if object_name == \"button_repair\": return None,", "145, 1084, 684] if object_name == \"cards\": return HsvFilter(0, 0,", "[10, 145, 1084, 684] # Buttons for clicking, known positions", "return None, [] if object_name == \"button_choose_map\": return None, []", "660, 1255, 725] if object_name == \"message_section_cleared\": return HsvFilter(0, 0,", "0, 0, 0), [3, 32, 1280, 794] if object_name ==", "[1100, 50, 1260, 210] if object_name == \"player_map_loc\": if kwargs.get(\"big_map\"):", "self.vAdd = vAdd self.vSub = vSub # Putting this here", "40, 38), [10, 145, 1084, 684] if object_name == \"message_boss_encounter\":", "HsvFilter(0, 92, 123, 29, 255, 255, 0, 0, 0, 20),", "0, 0, 0), [630, 520, 1120, 680] if object_name ==", "return HsvFilter(0, 124, 62, 88, 217, 246, 0, 0, 0,", "128, 82, 8, 255, 255, 0, 66, 30, 34), [485,", "object_name == \"message_boss_encounter\": return HsvFilter(0, 92, 128, 13, 255, 255,", "object_name == \"gate_map_pos\": # This is a very difficult one", "filter\") return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "== \"inventory_green_item\": return HsvFilter(37, 147, 0, 61, 255, 255, 0,", "self.vMax = vMax self.sAdd = sAdd self.sSub = sSub self.vAdd", "\"event_otherworld\": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "if object_name is None: #print(\"Using default filter\") return HsvFilter(0, 0,", "255, 255, 255, 0, 0, 0, 0), [10, 145, 1084,", "[] if object_name == \"button_inv_other\": return None, [] if object_name", "0, 0, 0), [1083, 295, 1188, 368] if object_name ==", "[] if object_name == \"button_inv_equipment\": return None, [] if object_name", "20), [415, 533, 888, 700] if object_name == \"loot_chest_normal\": #", "[464, 600, 855, 680] if object_name == \"message_go\": return HsvFilter(32,", "0, 0), [600, 222, 700, 275] if object_name == \"enemy_nametag\":", "to look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if object_name", "== \"button_repair\": return None, [208, 600] # These are all", "this here out of the way as it's a chonk", "0, 0, 0) if object_name == \"event_chest_special_appear\": return HsvFilter(0, 124,", "0, 0), [10, 145, 1084, 684] if object_name == \"cards\":", "is a difficult one to separate return HsvFilter(0, 34, 38,", "179, 65, 255, 0, 0, 0, 17), [464, 600, 855,", "if object_name == \"event_otherworld\": return HsvFilter(0, 0, 0, 255, 255,", "0, 66, 30, 34), [1100, 50, 1260, 210] if object_name", "0, 0, 255, 255, 255, 0, 0, 0, 0), [735,", "if object_name == \"button_repair_confirm\": return None, [] if object_name ==", "if object_name == \"prompt_select_card\": return HsvFilter(79, 149, 140, 255, 255,", "255, 255, 0, 0, 0, 0), [10, 145, 1084, 684]", "Putting this here out of the way as it's a", "if object_name == \"prompt_press_x_pickup\": return HsvFilter(78, 110, 110, 97, 189,", "class HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None,", "typing # custom data structure to hold the state of", "vMax self.sAdd = sAdd self.sSub = sSub self.vAdd = vAdd", "\"button_inv_equipment\": return None, [] if object_name == \"button_inv_consume\": return None,", "== \"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194, 32, 255,", "0, 0, 0, 0) if object_name == \"event_chest_special_appear\": return HsvFilter(0,", "255, 0, 0, 0, 0) if object_name == \"button_repair\": return", "object_name == \"enemy_nametag\": return HsvFilter(49, 0, 139, 91, 30, 197,", "object_name == \"prompt_select_card\": return HsvFilter(79, 149, 140, 255, 255, 255,", "very difficult one to separate if kwargs.get(\"big_map\"): return HsvFilter(0, 128,", "[600, 222, 700, 275] if object_name == \"enemy_nametag\": return HsvFilter(49,", "\"cards\": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "255, 0, 12, 0, 0), [600, 222, 700, 275] if", "32, 21) if object_name == \"inventory_purple_item\": return HsvFilter(126, 153, 0,", "0, 0, 0, 17), [464, 600, 855, 680] if object_name", "128, 13, 255, 255, 0, 0, 0, 0), [630, 520,", "255, 255, 0, 0, 0, 0), [3, 32, 1280, 794]", "all To be done later if object_name == \"event_card_trade\": return", "684] return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "0), [10, 145, 1084, 684] # Buttons for clicking, known", "31, 240, 217, 0, 0, 0, 0), [460, 420, 855,", "91, 30, 197, 0, 0, 40, 38), [10, 145, 1084,", "0, 0, 0, 0), [630, 520, 1120, 680] if object_name", "and vice versa return HsvFilter(19, 91, 107, 31, 168, 181,", "0, 0, 0, 0), [735, 32, 1085, 100] if object_name", "0), [1100, 50, 1260, 210] if object_name == \"other_player_map_loc\": if", "0, 0, 0), [485, 280, 900, 734] return HsvFilter(31, 94,", "object_name == \"cards\": return HsvFilter(0, 0, 0, 255, 255, 255,", "8, 255, 255, 0, 66, 30, 34), [1100, 50, 1260,", "sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin", "are all To be done later if object_name == \"event_card_trade\":", "116, 33, 32, 210, 59, 16, 0, 3, 0), [10,", "if object_name == \"event_chest_special_appear\": return HsvFilter(0, 124, 62, 88, 217,", "0, 0), [10, 145, 1084, 684] return HsvFilter(0, 0, 0,", "self.sAdd = sAdd self.sSub = sSub self.vAdd = vAdd self.vSub", "0, 0, 0), [735, 32, 1085, 100] if object_name ==", "255, 0, 66, 30, 34), [485, 280, 900, 734] return", "275] if object_name == \"enemy_nametag\": return HsvFilter(49, 0, 139, 91,", "0, 0, 0), [1080, 660, 1255, 725] if object_name ==", "green items and vice versa return HsvFilter(19, 91, 107, 31,", "1084, 684] if object_name == \"message_boss_encounter\": return HsvFilter(0, 92, 128,", "255, 255, 0, 0, 0, 0), [735, 32, 1085, 100]", "255, 0, 0, 70, 37), [485, 280, 900, 734] return", "59, 16, 0, 3, 0), [10, 145, 1084, 684] if", "0, 0, 0, 0) if object_name == \"inventory_yellow_item\": # This", "object_name == \"button_repair\": return None, [208, 600] # These are", "if object_name == \"message_boss_encounter\": return HsvFilter(0, 92, 128, 13, 255,", "HsvFilter(32, 114, 89, 58, 255, 255, 0, 12, 0, 0),", "255, 0, 0, 70, 37), [1100, 50, 1260, 210] if", "if kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194, 32, 255, 255, 0,", "\"enemy_nametag\": return HsvFilter(49, 0, 139, 91, 30, 197, 0, 0,", "object_name == \"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86, 73,", "\"button_repair\": return None, [208, 600] # These are all To", "[] if object_name == \"button_inv_consume\": return None, [] if object_name", "done later if object_name == \"event_card_trade\": return HsvFilter(0, 0, 0,", "if object_name == \"enemy_arrow\": return HsvFilter(0, 0, 0, 255, 255,", "0), [1083, 295, 1188, 368] if object_name == \"enemy_map_loc\": #print(\"Using", "255, 255, 255, 0, 0, 0, 0), [735, 32, 1085,", "684] # Buttons for clicking, known positions if object_name ==", "197, 0, 0, 40, 38), [10, 145, 1084, 684] if", "versa return HsvFilter(19, 91, 107, 31, 168, 181, 0, 11,", "0, 0, 0), [10, 145, 1084, 684] if object_name ==", "110, 97, 189, 255, 0, 0, 0, 0), [1080, 660,", "= vSub # Putting this here out of the way", "0, 5, 12), [10, 145, 1084, 684] if object_name ==", "position to look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if", "== \"button_explore_again\": return None, [] if object_name == \"button_choose_map\": return", "73, 255, 255, 0, 0, 0, 0), [485, 280, 900,", "21) if object_name == \"inventory_purple_item\": return HsvFilter(126, 153, 0, 255,", "0, 0, 0), [10, 145, 1084, 684] # Buttons for", "255, 0, 0, 0, 0), [10, 145, 1084, 684] #", "58, 255, 255, 0, 12, 0, 0), [600, 222, 700,", "123, 29, 255, 255, 0, 0, 0, 20), [415, 533,", "0, 0, 0), [1100, 50, 1260, 210] if object_name ==", "# Buttons for clicking, known positions if object_name == \"button_explore_again\":", "string case it will return the optimal filter and the", "145, 1084, 684] return HsvFilter(0, 0, 0, 255, 255, 255,", "149, 140, 255, 255, 255, 0, 0, 0, 0) if", "HsvFilter(78, 110, 110, 97, 189, 255, 0, 0, 0, 0),", "object_name == \"loot_distant\": return HsvFilter(14, 116, 33, 32, 210, 59,", "object_name == \"loot_chest_normal\": # This is a difficult one to", "8, 255, 255, 0, 66, 30, 34), [485, 280, 900,", "\"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86, 73, 255, 255,", "== \"button_repair_confirm\": return None, [] if object_name == \"inv_grid_location\": return", "if object_name == \"dungeon_check\": return HsvFilter(0, 73, 94, 106, 255,", "38), [10, 145, 1084, 684] if object_name == \"message_boss_encounter\": return", "[630, 520, 1120, 680] if object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0,", "one to separate return HsvFilter(0, 34, 38, 28, 152, 124,", "This is a dangerous one as it can barely #", "0, 20), [415, 533, 888, 700] if object_name == \"loot_chest_normal\":", "1084, 684] # Buttons for clicking, known positions if object_name", "== \"enemy_map_loc\": #print(\"Using enemy location filter\") if kwargs.get(\"big_map\"): return HsvFilter(0,", "object_name == \"enemy_arrow\": return HsvFilter(0, 0, 0, 255, 255, 255,", "255, 255, 0, 0, 0, 0), [1100, 50, 1260, 210]", "return HsvFilter(0, 155, 135, 31, 240, 217, 0, 0, 0,", "HsvFilter(0, 0, 214, 179, 65, 255, 0, 0, 0, 17),", "1084, 684] if object_name == \"loot_near\": return HsvFilter(0, 155, 135,", "98, 92, 105, 255, 225, 0, 54, 24, 38) if", "HsvFilter(0, 92, 128, 13, 255, 255, 0, 0, 0, 0),", "= sMin self.vMin = vMin self.hMax = hMax self.sMax =", "62, 88, 217, 246, 0, 0, 0, 0) if object_name", "return None, [] if object_name == \"button_inv_consume\": return None, []", "734] return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0,", "0), [630, 520, 1120, 680] if object_name == \"display_boss_name_and_healthbar\": return", "object_name == \"button_open_store\": return None, [] if object_name == \"button_go_town\":", "0, 255, 255, 255, 0, 0, 0, 0), [3, 32,", "420, 855, 710] if object_name == \"prompt_press_x_pickup\": return HsvFilter(78, 110,", "\"message_section_cleared\": return HsvFilter(0, 0, 214, 179, 65, 255, 0, 0,", "210, 59, 16, 0, 3, 0), [10, 145, 1084, 684]", "return HsvFilter(0, 0, 214, 179, 65, 255, 0, 0, 0,", "object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0, 255,", "0, 0) if object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0,", "[485, 280, 900, 734] return HsvFilter(16, 172, 194, 32, 255,", "89, 58, 255, 255, 0, 12, 0, 0), [600, 222,", "None, [] if object_name == \"button_open_store\": return None, [] if", "will return the optimal filter and the correct position to", "return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30,", "0, 0) if object_name == \"event_otherworld\": return HsvFilter(0, 0, 0,", "\"message_go\": return HsvFilter(32, 114, 89, 58, 255, 255, 0, 12,", "172, 194, 32, 255, 255, 0, 0, 70, 37), [485,", "295, 1188, 368] if object_name == \"enemy_map_loc\": #print(\"Using enemy location", "106, 255, 255, 0, 0, 0, 0), [1083, 295, 1188,", "0) if object_name == \"button_repair\": return None, [208, 600] #", "255, 0, 0, 0, 0), [1100, 50, 1260, 210] if", "255, 255, 0, 66, 30, 34), [1100, 50, 1260, 210]", "== \"message_boss_encounter\": return HsvFilter(0, 92, 128, 13, 255, 255, 0,", "50, 1260, 210] if object_name == \"gate_map_pos\": # This is", "return HsvFilter(72, 98, 92, 105, 255, 225, 0, 54, 24,", "0, 0) if object_name == \"inventory_blue_item\": return HsvFilter(79, 169, 0,", "255, 0, 0, 0, 0) if object_name == \"loot_chest_special\": if", "\"event_card_trade\": return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "900, 734] return HsvFilter(31, 94, 86, 73, 255, 255, 0,", "look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if object_name is", "# For a given item string case it will return", "object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123, 29, 255, 255,", "[10, 145, 1084, 684] if object_name == \"map_outline\": if kwargs.get(\"big_map\"):", "181, 0, 11, 32, 21) if object_name == \"inventory_purple_item\": return", "114, 89, 58, 255, 255, 0, 12, 0, 0), [600,", "73, 255, 255, 0, 0, 0, 0), [1100, 50, 1260,", "900, 734] return HsvFilter(0, 128, 82, 8, 255, 255, 0,", "710] if object_name == \"prompt_press_x_pickup\": return HsvFilter(78, 110, 110, 97,", "145, 1084, 684] if object_name == \"map_outline\": if kwargs.get(\"big_map\"): return", "= hMin self.sMin = sMin self.vMin = vMin self.hMax =", "case it will return the optimal filter and the correct", "0, 61, 255, 255, 0, 0, 0, 0) if object_name", "HsvFilter(0, 124, 62, 88, 217, 246, 0, 0, 0, 0)", "object_name == \"event_otherworld\": return HsvFilter(0, 0, 0, 255, 255, 255,", "189, 255, 0, 0, 0, 0), [1080, 660, 1255, 725]", "HsvFilter(79, 149, 140, 255, 255, 255, 0, 0, 0, 0)", "a given item string case it will return the optimal", "separate if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255, 255,", "0, 0, 0) if object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"): return", "0, 255, 255, 255, 0, 0, 0, 0), [10, 145,", "hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin = hMin", "0) if object_name == \"inventory_blue_item\": return HsvFilter(79, 169, 0, 109,", "object_name == \"button_inv_other\": return None, [] if object_name == \"button_repair_confirm\":", "return None, [] if object_name == \"button_inv_other\": return None, []", "return None, [] if object_name == \"button_repair_confirm\": return None, []", "object_name == \"prompt_move_reward_screen\": return HsvFilter(72, 98, 92, 105, 255, 225,", "246, 0, 0, 0, 0) if object_name == \"inventory_green_item\": return", "0) if object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0, 0,", "0), [1080, 660, 1255, 725] if object_name == \"message_section_cleared\": return", "return HsvFilter(79, 169, 0, 109, 246, 188, 0, 0, 0,", "107, 31, 168, 181, 0, 11, 32, 21) if object_name", "\"button_explore_again\": return None, [] if object_name == \"button_choose_map\": return None,", "filter and the correct position to look def grab_object_preset(object_name=None, **kwargs)", "0), [735, 32, 1085, 100] if object_name == \"enemy_arrow\": return", "17), [464, 600, 855, 680] if object_name == \"message_go\": return", "HsvFilter(0, 73, 94, 106, 255, 255, 0, 0, 0, 0),", "\"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0, 255, 255, 255,", "92, 123, 29, 255, 255, 0, 0, 0, 20), [415,", "This is a very difficult one to separate if kwargs.get(\"big_map\"):", "\"inventory_green_item\": return HsvFilter(37, 147, 0, 61, 255, 255, 0, 0,", "item string case it will return the optimal filter and", "600, 855, 680] if object_name == \"message_go\": return HsvFilter(32, 114,", "== \"button_inv_other\": return None, [] if object_name == \"button_repair_confirm\": return", "return HsvFilter(126, 153, 0, 255, 255, 255, 0, 0, 0,", "0, 0, 0), [460, 420, 855, 710] if object_name ==", "sMax self.vMax = vMax self.sAdd = sAdd self.sSub = sSub", "#print(\"Using enemy location filter\") if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82,", "140, 255, 255, 255, 0, 0, 0, 0) if object_name", "0, 54, 24, 38) if object_name == \"prompt_select_card\": return HsvFilter(79,", "== \"gate_map_pos\": # This is a very difficult one to", "if object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194,", "be done later if object_name == \"event_card_trade\": return HsvFilter(0, 0,", "return HsvFilter(14, 116, 33, 32, 210, 59, 16, 0, 3,", "an HSV filter class HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None,", "50, 1260, 210] if object_name == \"loot_distant\": return HsvFilter(14, 116,", "\"inventory_blue_item\": return HsvFilter(79, 169, 0, 109, 246, 188, 0, 0,", "= sMax self.vMax = vMax self.sAdd = sAdd self.sSub =", "object_name == \"dungeon_check\": return HsvFilter(0, 73, 94, 106, 255, 255,", "HsvFilter(0, 34, 38, 28, 152, 124, 0, 0, 5, 12),", "None, [] if object_name == \"button_inv_consume\": return None, [] if", "124, 0, 0, 5, 12), [10, 145, 1084, 684] if", "return HsvFilter(49, 0, 139, 91, 30, 197, 0, 0, 40,", "== \"enemy_nametag\": return HsvFilter(49, 0, 139, 91, 30, 197, 0,", "66, 30, 34), [1100, 50, 1260, 210] if object_name ==", "50, 1260, 210] if object_name == \"player_map_loc\": if kwargs.get(\"big_map\"): return", "a chonk # For a given item string case it", "vSub # Putting this here out of the way as", "== \"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86, 73, 255,", "sAdd self.sSub = sSub self.vAdd = vAdd self.vSub = vSub", "0), [10, 145, 1084, 684] if object_name == \"cards\": return", "255, 255, 0, 0, 0, 0) if object_name == \"event_chest_special_appear\":", "location filter\") if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255,", "Buttons for clicking, known positions if object_name == \"button_explore_again\": return", "self.sSub = sSub self.vAdd = vAdd self.vSub = vSub #", "a very difficult one to separate if kwargs.get(\"big_map\"): return HsvFilter(0,", "== \"event_chest_special_appear\": return HsvFilter(0, 124, 62, 88, 217, 246, 0,", "object_name == \"button_go_town\": return None, [] if object_name == \"button_inv_equipment\":", "0, 0), [460, 420, 855, 710] if object_name == \"prompt_press_x_pickup\":", "105, 255, 225, 0, 54, 24, 38) if object_name ==", "barely # distinguish against green items and vice versa return", "\"gate_map_pos\": # This is a very difficult one to separate", "[415, 533, 888, 700] if object_name == \"loot_chest_normal\": # This", "0, 214, 179, 65, 255, 0, 0, 0, 17), [464,", "object_name == \"enemy_map_loc\": #print(\"Using enemy location filter\") if kwargs.get(\"big_map\"): return", "[460, 420, 855, 710] if object_name == \"prompt_press_x_pickup\": return HsvFilter(78,", "== \"enemy_arrow\": return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "None, [] if object_name == \"inv_grid_location\": return None, [533+44*kwargs.get(\"col\"), 277+44*kwargs.get(\"row\")]", "145, 1084, 684] if object_name == \"loot_near\": return HsvFilter(0, 155,", "34, 38, 28, 152, 124, 0, 0, 5, 12), [10,", "255, 255, 0, 0, 0, 0), [630, 520, 1120, 680]", "return HsvFilter(19, 91, 107, 31, 168, 181, 0, 11, 32,", "\"dungeon_check\": return HsvFilter(0, 73, 94, 106, 255, 255, 0, 0,", "[10, 145, 1084, 684] if object_name == \"loot_near\": return HsvFilter(0,", "32, 255, 255, 0, 0, 70, 37), [485, 280, 900,", "if object_name == \"event_card_trade\": return HsvFilter(0, 0, 0, 255, 255,", "kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66,", "it's a chonk # For a given item string case", "positions if object_name == \"button_explore_again\": return None, [] if object_name", "== \"prompt_move_reward_screen\": return HsvFilter(72, 98, 92, 105, 255, 225, 0,", "\"loot_distant\": return HsvFilter(14, 116, 33, 32, 210, 59, 16, 0,", "# This is a difficult one to separate return HsvFilter(0,", "a difficult one to separate return HsvFilter(0, 34, 38, 28,", "difficult one to separate return HsvFilter(0, 34, 38, 28, 152,", "hMax self.sMax = sMax self.vMax = vMax self.sAdd = sAdd", "None: #print(\"Using default filter\") return HsvFilter(0, 0, 0, 255, 255,", "a dangerous one as it can barely # distinguish against", "HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None,", "return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0, 70,", "out of the way as it's a chonk # For", "0, 0, 0, 0), [460, 420, 855, 710] if object_name", "0, 0), [10, 145, 1084, 684] # Buttons for clicking,", "0, 66, 30, 34), [485, 280, 900, 734] return HsvFilter(0,", "32, 210, 59, 16, 0, 3, 0), [10, 145, 1084,", "= vMin self.hMax = hMax self.sMax = sMax self.vMax =", "HsvFilter(31, 94, 86, 73, 255, 255, 0, 0, 0, 0),", "and the correct position to look def grab_object_preset(object_name=None, **kwargs) ->", "\"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194, 32, 255, 255,", "\"event_chest_special_appear\": return HsvFilter(0, 124, 62, 88, 217, 246, 0, 0,", "255, 0, 0, 0, 20), [415, 533, 888, 700] if", "object_name == \"button_inv_equipment\": return None, [] if object_name == \"button_inv_consume\":", "145, 1084, 684] if object_name == \"message_boss_encounter\": return HsvFilter(0, 92,", "self.sMin = sMin self.vMin = vMin self.hMax = hMax self.sMax", "= sSub self.vAdd = vAdd self.vSub = vSub # Putting", "is a dangerous one as it can barely # distinguish", "0, 0, 20), [415, 533, 888, 700] if object_name ==", "147, 0, 61, 255, 255, 0, 0, 0, 0) if", "These are all To be done later if object_name ==", "1120, 680] if object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123,", "255, 0, 0, 0, 0) if object_name == \"event_otherworld\": return", "return HsvFilter(79, 149, 140, 255, 255, 255, 0, 0, 0,", "object_name == \"inventory_yellow_item\": # This is a dangerous one as", "0, 0, 0, 0), [1083, 295, 1188, 368] if object_name", "== \"loot_near\": return HsvFilter(0, 155, 135, 31, 240, 217, 0,", "== \"message_go\": return HsvFilter(32, 114, 89, 58, 255, 255, 0,", "70, 37), [485, 280, 900, 734] return HsvFilter(16, 172, 194,", "172, 194, 32, 255, 255, 0, 0, 70, 37), [1100,", "as it can barely # distinguish against green items and", "separate return HsvFilter(0, 34, 38, 28, 152, 124, 0, 0,", "[1083, 295, 1188, 368] if object_name == \"enemy_map_loc\": #print(\"Using enemy", "#print(\"Using default filter\") return HsvFilter(0, 0, 0, 255, 255, 255,", "self.vMin = vMin self.hMax = hMax self.sMax = sMax self.vMax", "return HsvFilter(0, 92, 128, 13, 255, 255, 0, 0, 0,", "255, 255, 0, 0, 0, 0), [1083, 295, 1188, 368]", "0, 0), [1083, 295, 1188, 368] if object_name == \"enemy_map_loc\":", "0, 0, 40, 38), [10, 145, 1084, 684] if object_name", "533, 888, 700] if object_name == \"loot_chest_normal\": # This is", "filter class HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None,", "one as it can barely # distinguish against green items", "\"button_choose_map\": return None, [] if object_name == \"button_open_store\": return None,", "\"message_boss_encounter\": return HsvFilter(0, 92, 128, 13, 255, 255, 0, 0,", "255, 255, 0, 0, 70, 37), [1100, 50, 1260, 210]", "vAdd=None, vSub=None): self.hMin = hMin self.sMin = sMin self.vMin =", "255, 0, 0, 0, 0), [3, 32, 1280, 794] if", "1188, 368] if object_name == \"enemy_map_loc\": #print(\"Using enemy location filter\")", "0), [600, 222, 700, 275] if object_name == \"enemy_nametag\": return", "0), [3, 32, 1280, 794] if object_name == \"dungeon_check\": return", "0, 3, 0), [10, 145, 1084, 684] if object_name ==", "\"inventory_yellow_item\": # This is a dangerous one as it can", "153, 0, 255, 255, 255, 0, 0, 0, 0) if", "37), [485, 280, 900, 734] return HsvFilter(16, 172, 194, 32,", "3, 0), [10, 145, 1084, 684] if object_name == \"loot_near\":", "900, 734] return HsvFilter(16, 172, 194, 32, 255, 255, 0,", "50, 1260, 210] if object_name == \"prompt_move_reward_screen\": return HsvFilter(72, 98,", "== \"button_choose_map\": return None, [] if object_name == \"button_open_store\": return", "if object_name == \"loot_chest_normal\": # This is a difficult one", "if object_name == \"inventory_blue_item\": return HsvFilter(79, 169, 0, 109, 246,", "== \"button_go_town\": return None, [] if object_name == \"button_inv_equipment\": return", "0, 0) if object_name == \"inventory_green_item\": return HsvFilter(37, 147, 0,", "13, 255, 255, 0, 0, 0, 0), [630, 520, 1120,", "if object_name == \"inventory_yellow_item\": # This is a dangerous one", "object_name == \"button_repair_confirm\": return None, [] if object_name == \"inv_grid_location\":", "\"button_inv_other\": return None, [] if object_name == \"button_repair_confirm\": return None,", "the optimal filter and the correct position to look def", "object_name == \"inventory_purple_item\": return HsvFilter(126, 153, 0, 255, 255, 255,", "32, 1280, 794] if object_name == \"dungeon_check\": return HsvFilter(0, 73,", "\"loot_chest_normal\": # This is a difficult one to separate return", "is a very difficult one to separate if kwargs.get(\"big_map\"): return", "== \"dungeon_check\": return HsvFilter(0, 73, 94, 106, 255, 255, 0,", "0, 0, 0, 0) if object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"):", "34), [1100, 50, 1260, 210] if object_name == \"player_map_loc\": if", "169, 0, 109, 246, 188, 0, 0, 0, 0) if", "222, 700, 275] if object_name == \"enemy_nametag\": return HsvFilter(49, 0,", "== \"loot_chest_normal\": # This is a difficult one to separate", "if object_name == \"cards\": return HsvFilter(0, 0, 0, 255, 255,", "255, 0, 0, 0, 0) if object_name == \"inventory_blue_item\": return", "255, 0, 0, 0, 0), [10, 145, 1084, 684] return", "82, 8, 255, 255, 0, 66, 30, 34), [1100, 50,", "return None, [] if object_name == \"inv_grid_location\": return None, [533+44*kwargs.get(\"col\"),", "255, 0, 0, 0, 17), [464, 600, 855, 680] if", "correct position to look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]:", "29, 255, 255, 0, 0, 0, 20), [415, 533, 888,", "66, 30, 34), [485, 280, 900, 734] return HsvFilter(0, 128,", "list]: if object_name is None: #print(\"Using default filter\") return HsvFilter(0,", "255, 255, 255, 0, 0, 0, 0), [3, 32, 1280,", "HsvFilter(0, 155, 135, 31, 240, 217, 0, 0, 0, 0),", "32, 1085, 100] if object_name == \"enemy_arrow\": return HsvFilter(0, 0,", "if object_name == \"message_section_cleared\": return HsvFilter(0, 0, 214, 179, 65,", "\"button_go_town\": return None, [] if object_name == \"button_inv_equipment\": return None,", "240, 217, 0, 0, 0, 0), [460, 420, 855, 710]", "12, 0, 0), [600, 222, 700, 275] if object_name ==", "700, 275] if object_name == \"enemy_nametag\": return HsvFilter(49, 0, 139,", "vSub=None): self.hMin = hMin self.sMin = sMin self.vMin = vMin", "734] return HsvFilter(31, 94, 86, 73, 255, 255, 0, 0,", "225, 0, 54, 24, 38) if object_name == \"prompt_select_card\": return", "0, 0), [485, 280, 900, 734] return HsvFilter(31, 94, 86,", "[1100, 50, 1260, 210] if object_name == \"prompt_move_reward_screen\": return HsvFilter(72,", "here out of the way as it's a chonk #", "object_name == \"message_section_cleared\": return HsvFilter(0, 0, 214, 179, 65, 255,", "None, [208, 600] # These are all To be done", "\"loot_near\": return HsvFilter(0, 155, 135, 31, 240, 217, 0, 0,", "0, 0, 255, 255, 255, 0, 0, 0, 0), [3,", "if object_name == \"button_inv_other\": return None, [] if object_name ==", "the correct position to look def grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter,", "For a given item string case it will return the", "if object_name == \"button_go_town\": return None, [] if object_name ==", "== \"event_otherworld\": return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "255, 0, 0, 0, 0), [1080, 660, 1255, 725] if", "# custom data structure to hold the state of an", "to separate return HsvFilter(0, 34, 38, 28, 152, 124, 0,", "255, 255, 0, 0, 0, 0) if object_name == \"loot_chest_special\":", "for clicking, known positions if object_name == \"button_explore_again\": return None,", "520, 1120, 680] if object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0, 92,", "-> typing.Tuple[HsvFilter, list]: if object_name is None: #print(\"Using default filter\")", "== \"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123, 29, 255, 255, 0,", "194, 32, 255, 255, 0, 0, 70, 37), [485, 280,", "0, 70, 37), [485, 280, 900, 734] return HsvFilter(16, 172,", "280, 900, 734] return HsvFilter(0, 128, 82, 8, 255, 255,", "0, 17), [464, 600, 855, 680] if object_name == \"message_go\":", "HsvFilter(49, 0, 139, 91, 30, 197, 0, 0, 40, 38),", "structure to hold the state of an HSV filter class", "None, [] if object_name == \"button_inv_other\": return None, [] if", "given item string case it will return the optimal filter", "1255, 725] if object_name == \"message_section_cleared\": return HsvFilter(0, 0, 214,", "== \"button_inv_equipment\": return None, [] if object_name == \"button_inv_consume\": return", "0, 0, 70, 37), [485, 280, 900, 734] return HsvFilter(16,", "1085, 100] if object_name == \"enemy_arrow\": return HsvFilter(0, 0, 0,", "\"button_repair_confirm\": return None, [] if object_name == \"inv_grid_location\": return None,", "== \"cards\": return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "210] if object_name == \"loot_distant\": return HsvFilter(14, 116, 33, 32,", "[] if object_name == \"button_open_store\": return None, [] if object_name", "16, 0, 3, 0), [10, 145, 1084, 684] if object_name", "145, 1084, 684] # Buttons for clicking, known positions if", "0, 0, 0, 0), [1100, 50, 1260, 210] if object_name", "if object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123, 29, 255,", "HsvFilter(19, 91, 107, 31, 168, 181, 0, 11, 32, 21)", "object_name == \"inventory_blue_item\": return HsvFilter(79, 169, 0, 109, 246, 188,", "30, 34), [485, 280, 900, 734] return HsvFilter(0, 128, 82,", "210] if object_name == \"gate_map_pos\": # This is a very", "70, 37), [1100, 50, 1260, 210] if object_name == \"loot_distant\":", "210] if object_name == \"player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(31, 94,", "[1100, 50, 1260, 210] if object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"):", "[10, 145, 1084, 684] return HsvFilter(0, 0, 0, 255, 255,", "default filter\") return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "== \"loot_distant\": return HsvFilter(14, 116, 33, 32, 210, 59, 16,", "= sAdd self.sSub = sSub self.vAdd = vAdd self.vSub =", "grab_object_preset(object_name=None, **kwargs) -> typing.Tuple[HsvFilter, list]: if object_name is None: #print(\"Using", "HSV filter class HsvFilter: def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None,", "one to separate if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8,", "88, 217, 246, 0, 0, 0, 0) if object_name ==", "0, 0), [630, 520, 1120, 680] if object_name == \"display_boss_name_and_healthbar\":", "38) if object_name == \"prompt_select_card\": return HsvFilter(79, 149, 140, 255,", "\"inventory_purple_item\": return HsvFilter(126, 153, 0, 255, 255, 255, 0, 0,", "return None, [] if object_name == \"button_open_store\": return None, []", "152, 124, 0, 0, 5, 12), [10, 145, 1084, 684]", "== \"prompt_press_x_pickup\": return HsvFilter(78, 110, 110, 97, 189, 255, 0,", "import typing # custom data structure to hold the state", "255, 0, 0, 0, 0), [735, 32, 1085, 100] if", "__init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None,", "0, 0) if object_name == \"event_chest_special_appear\": return HsvFilter(0, 124, 62,", "255, 255, 0, 12, 0, 0), [600, 222, 700, 275]", "94, 86, 73, 255, 255, 0, 0, 0, 0), [1100,", "255, 0, 66, 30, 34), [1100, 50, 1260, 210] if", "94, 106, 255, 255, 0, 0, 0, 0), [1083, 295,", "== \"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255,", "difficult one to separate if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82,", "can barely # distinguish against green items and vice versa", "later if object_name == \"event_card_trade\": return HsvFilter(0, 0, 0, 255,", "0, 0, 0, 0), [10, 145, 1084, 684] return HsvFilter(0,", "self.hMin = hMin self.sMin = sMin self.vMin = vMin self.hMax", "if object_name == \"button_repair\": return None, [208, 600] # These", "30, 34), [1100, 50, 1260, 210] if object_name == \"prompt_move_reward_screen\":", "[] if object_name == \"button_repair_confirm\": return None, [] if object_name", "clicking, known positions if object_name == \"button_explore_again\": return None, []", "return the optimal filter and the correct position to look", "hMin self.sMin = sMin self.vMin = vMin self.hMax = hMax", "255, 255, 0, 0, 0, 0), [485, 280, 900, 734]", "== \"button_inv_consume\": return None, [] if object_name == \"button_inv_other\": return", "[1100, 50, 1260, 210] if object_name == \"gate_map_pos\": # This", "enemy location filter\") if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8,", "168, 181, 0, 11, 32, 21) if object_name == \"inventory_purple_item\":", "hold the state of an HSV filter class HsvFilter: def", "\"button_inv_consume\": return None, [] if object_name == \"button_inv_other\": return None,", "sSub self.vAdd = vAdd self.vSub = vSub # Putting this", "0, 0, 0) if object_name == \"inventory_green_item\": return HsvFilter(37, 147,", "def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None,", "== \"prompt_select_card\": return HsvFilter(79, 149, 140, 255, 255, 255, 0,", "HsvFilter(37, 147, 0, 61, 255, 255, 0, 0, 0, 0)", "0, 0, 70, 37), [1100, 50, 1260, 210] if object_name", "if object_name == \"loot_near\": return HsvFilter(0, 155, 135, 31, 240,", "to separate if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255,", "None, [] if object_name == \"button_go_town\": return None, [] if", "680] if object_name == \"display_boss_name_and_healthbar\": return HsvFilter(0, 92, 123, 29,", "if object_name == \"button_inv_equipment\": return None, [] if object_name ==", "1260, 210] if object_name == \"loot_distant\": return HsvFilter(14, 116, 33,", "110, 110, 97, 189, 255, 0, 0, 0, 0), [1080,", "0, 0), [1080, 660, 1255, 725] if object_name == \"message_section_cleared\":", "255, 255, 0, 0, 70, 37), [485, 280, 900, 734]", "the state of an HSV filter class HsvFilter: def __init__(self,", "0, 12, 0, 0), [600, 222, 700, 275] if object_name", "HsvFilter(0, 128, 82, 8, 255, 255, 0, 66, 30, 34),", "255, 255, 0, 0, 0, 20), [415, 533, 888, 700]", "if object_name == \"prompt_move_reward_screen\": return HsvFilter(72, 98, 92, 105, 255,", "0, 0) if object_name == \"button_repair\": return None, [208, 600]", "888, 700] if object_name == \"loot_chest_normal\": # This is a", "34), [1100, 50, 1260, 210] if object_name == \"gate_map_pos\": #", "== \"inventory_yellow_item\": # This is a dangerous one as it", "255, 225, 0, 54, 24, 38) if object_name == \"prompt_select_card\":", "0, 0, 0) if object_name == \"event_otherworld\": return HsvFilter(0, 0,", "34), [1100, 50, 1260, 210] if object_name == \"prompt_move_reward_screen\": return", "vAdd self.vSub = vSub # Putting this here out of", "if object_name == \"loot_chest_special\": if kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0,", "object_name == \"button_explore_again\": return None, [] if object_name == \"button_choose_map\":", "vMin self.hMax = hMax self.sMax = sMax self.vMax = vMax", "\"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255, 255,", "HsvFilter(0, 0, 0, 255, 255, 255, 0, 0, 0, 0),", "368] if object_name == \"enemy_map_loc\": #print(\"Using enemy location filter\") if", "[485, 280, 900, 734] return HsvFilter(0, 128, 82, 8, 255,", "it will return the optimal filter and the correct position", "794] if object_name == \"dungeon_check\": return HsvFilter(0, 73, 94, 106,", "items and vice versa return HsvFilter(19, 91, 107, 31, 168,", "object_name == \"inventory_green_item\": return HsvFilter(37, 147, 0, 61, 255, 255,", "0, 0, 0, 0), [3, 32, 1280, 794] if object_name", "0, 0, 0) if object_name == \"inventory_blue_item\": return HsvFilter(79, 169,", "0, 0, 0, 0) if object_name == \"inventory_blue_item\": return HsvFilter(79,", "None, [] if object_name == \"button_choose_map\": return None, [] if", "255, 255, 0, 0, 0, 0) if object_name == \"inventory_blue_item\":", "== \"button_open_store\": return None, [] if object_name == \"button_go_town\": return", "0, 0, 0, 0), [1080, 660, 1255, 725] if object_name", "if object_name == \"loot_distant\": return HsvFilter(14, 116, 33, 32, 210,", "dangerous one as it can barely # distinguish against green", "self.vSub = vSub # Putting this here out of the", "82, 8, 255, 255, 0, 66, 30, 34), [485, 280,", "0, 0, 17), [464, 600, 855, 680] if object_name ==", "if object_name == \"message_go\": return HsvFilter(32, 114, 89, 58, 255,", "if kwargs.get(\"big_map\"): return HsvFilter(31, 94, 86, 73, 255, 255, 0,", "object_name == \"message_go\": return HsvFilter(32, 114, 89, 58, 255, 255,", "# These are all To be done later if object_name", "return None, [] if object_name == \"button_go_town\": return None, []", "object_name == \"button_choose_map\": return None, [] if object_name == \"button_open_store\":", "\"prompt_press_x_pickup\": return HsvFilter(78, 110, 110, 97, 189, 255, 0, 0,", "0, 0, 0) if object_name == \"inventory_yellow_item\": # This is", "== \"inventory_blue_item\": return HsvFilter(79, 169, 0, 109, 246, 188, 0,", "self.sMax = sMax self.vMax = vMax self.sAdd = sAdd self.sSub", "92, 128, 13, 255, 255, 0, 0, 0, 0), [630,", "object_name == \"prompt_press_x_pickup\": return HsvFilter(78, 110, 110, 97, 189, 255,", "0) if object_name == \"event_otherworld\": return HsvFilter(0, 0, 0, 255,", "33, 32, 210, 59, 16, 0, 3, 0), [10, 145,", "== \"message_section_cleared\": return HsvFilter(0, 0, 214, 179, 65, 255, 0,", "0), [485, 280, 900, 734] return HsvFilter(31, 94, 86, 73,", "\"prompt_select_card\": return HsvFilter(79, 149, 140, 255, 255, 255, 0, 0,", "0, 0), [735, 32, 1085, 100] if object_name == \"enemy_arrow\":", "kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0, 255, 255, 255, 0, 0,", "if object_name == \"enemy_map_loc\": #print(\"Using enemy location filter\") if kwargs.get(\"big_map\"):", "1280, 794] if object_name == \"dungeon_check\": return HsvFilter(0, 73, 94,", "0, 0, 0), [10, 145, 1084, 684] return HsvFilter(0, 0,", "128, 82, 8, 255, 255, 0, 66, 30, 34), [1100,", "0, 0), [3, 32, 1280, 794] if object_name == \"dungeon_check\":", "0, 0, 0, 0) if object_name == \"inventory_green_item\": return HsvFilter(37,", "to hold the state of an HSV filter class HsvFilter:", "1260, 210] if object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"): return HsvFilter(16,", "[735, 32, 1085, 100] if object_name == \"enemy_arrow\": return HsvFilter(0,", "[10, 145, 1084, 684] if object_name == \"cards\": return HsvFilter(0,", "725] if object_name == \"message_section_cleared\": return HsvFilter(0, 0, 214, 179,", "object_name is None: #print(\"Using default filter\") return HsvFilter(0, 0, 0,", "# distinguish against green items and vice versa return HsvFilter(19,", "91, 107, 31, 168, 181, 0, 11, 32, 21) if", "0, 0, 5, 12), [10, 145, 1084, 684] if object_name", "255, 0, 0, 0, 0), [1083, 295, 1188, 368] if", "0, 0, 0, 0), [10, 145, 1084, 684] if object_name", "[] if object_name == \"button_go_town\": return None, [] if object_name", "sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None): self.hMin = hMin self.sMin", "61, 255, 255, 0, 0, 0, 0) if object_name ==", "[3, 32, 1280, 794] if object_name == \"dungeon_check\": return HsvFilter(0,", "if object_name == \"inventory_green_item\": return HsvFilter(37, 147, 0, 61, 255,", "hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None, sAdd=None, sSub=None, vAdd=None, vSub=None):", "if object_name == \"map_outline\": if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82,", "24, 38) if object_name == \"prompt_select_card\": return HsvFilter(79, 149, 140,", "255, 255, 0, 66, 30, 34), [485, 280, 900, 734]", "109, 246, 188, 0, 0, 0, 0) if object_name ==", "object_name == \"loot_near\": return HsvFilter(0, 155, 135, 31, 240, 217,", "12), [10, 145, 1084, 684] if object_name == \"map_outline\": if", "if object_name == \"button_inv_consume\": return None, [] if object_name ==", "This is a difficult one to separate return HsvFilter(0, 34,", "855, 680] if object_name == \"message_go\": return HsvFilter(32, 114, 89,", "if object_name == \"inventory_purple_item\": return HsvFilter(126, 153, 0, 255, 255,", "1260, 210] if object_name == \"gate_map_pos\": # This is a", "known positions if object_name == \"button_explore_again\": return None, [] if", "self.hMax = hMax self.sMax = sMax self.vMax = vMax self.sAdd", "kwargs.get(\"big_map\"): return HsvFilter(16, 172, 194, 32, 255, 255, 0, 0,", "255, 255, 0, 0, 0, 0) if object_name == \"event_otherworld\":", "0, 0, 0, 0), [485, 280, 900, 734] return HsvFilter(31,", "[208, 600] # These are all To be done later", "155, 135, 31, 240, 217, 0, 0, 0, 0), [460,", "if kwargs.get(\"big_map\"): return HsvFilter(0, 0, 0, 255, 255, 255, 0,", "0, 0), [1100, 50, 1260, 210] if object_name == \"other_player_map_loc\":", "0, 109, 246, 188, 0, 0, 0, 0) if object_name", "734] return HsvFilter(0, 128, 82, 8, 255, 255, 0, 66,", "\"prompt_move_reward_screen\": return HsvFilter(72, 98, 92, 105, 255, 225, 0, 54,", "0, 0, 0, 20), [415, 533, 888, 700] if object_name", "filter\") if kwargs.get(\"big_map\"): return HsvFilter(0, 128, 82, 8, 255, 255,", "[1080, 660, 1255, 725] if object_name == \"message_section_cleared\": return HsvFilter(0,", "255, 0, 0, 0, 0), [630, 520, 1120, 680] if", "31, 168, 181, 0, 11, 32, 21) if object_name ==", "50, 1260, 210] if object_name == \"other_player_map_loc\": if kwargs.get(\"big_map\"): return", "255, 255, 255, 0, 0, 0, 0) if object_name ==", "= vAdd self.vSub = vSub # Putting this here out", "34), [485, 280, 900, 734] return HsvFilter(0, 128, 82, 8,", "30, 34), [1100, 50, 1260, 210] if object_name == \"player_map_loc\":", "chonk # For a given item string case it will", "94, 86, 73, 255, 255, 0, 0, 0, 0), [485,", "28, 152, 124, 0, 0, 5, 12), [10, 145, 1084,", "1260, 210] if object_name == \"prompt_move_reward_screen\": return HsvFilter(72, 98, 92,", "246, 188, 0, 0, 0, 0) if object_name == \"inventory_yellow_item\":", "684] if object_name == \"cards\": return HsvFilter(0, 0, 0, 255,", "return None, [] if object_name == \"button_inv_equipment\": return None, []", "against green items and vice versa return HsvFilter(19, 91, 107,", "== \"inventory_purple_item\": return HsvFilter(126, 153, 0, 255, 255, 255, 0," ]
[ "self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max ==", "== 11 def test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin works", "== 3.5 assert self.state.n_bin == 3 def test_caching(self): self.state.comp =", "2 state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper')", "repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data =", "as np from numpy.testing import assert_allclose from echo import CallbackProperty,", "def setup_method(self, method): self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3,", "CallbackProperty() log = CallbackProperty(False) scale = CallbackProperty(100) self.state = SimpleState()", "def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'],", "data state.comp = data.id['x'] assert state.lower == -2 assert state.upper", "self.helper.lower == -100 assert self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute", "override the existing values. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2,", "def setup_method(self, method): self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3,", "log='log') state.data = data state.comp = data.id['x'] assert state.lower ==", "+99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When", "self.y_id assert self.helper.value == 2.5 self.state.comp = self.x_id assert self.helper.value", "comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() percentile", "# Make sure that values are re-cached when edited manually", "ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState() state1.a", "= self.x_id assert self.helper.lower == +100 assert self.helper.upper == -100", "= ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState()", "assert self.state.x_min == -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin", "state.n_bin = 11 state.comp = data.id['y'] assert state.n_bin == 3", "= self.y_id assert self.helper.value == 2.5 self.helper.attribute = self.x_id assert", "13 state.comp = data.id['x'] assert state.n_bin == 11 def test_limits_helper_initial_values():", "state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax(): data = Data(x=[3, 1,", "\"\"\" def test_state_str_repr(): state1 = SimpleTestState() state1.a = 2 state1.b", "== -100 assert self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute =", "# Changing scale mode updates the limits self.helper.percentile = 99.5", "self.helper.value == 2.5 self.state.comp = self.x_id assert self.helper.value == 42", "empty # but some attributes were set to values -", "'e', 'f', 'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State):", "lower = CallbackProperty() upper = CallbackProperty() scale = CallbackProperty() state", "= 13 state.comp = data.id['x'] assert state.n_bin == 11 def", "2 state1.b = 'hello' state1.flat = [1, 3, 4] sub_state", "'D') def test_nan_inf_minmax(): data = Data(x=[3, 1, -2, np.inf, np.nan],", "test_minmax(self): assert self.helper.lower == -100 assert self.helper.upper == +100 def", "helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data =", "assert state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin ==", "method): self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999), label='test_data')", "upper = CallbackProperty() percentile = CallbackProperty() log = CallbackProperty() state", "-1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression test for a", "When switching to custom, the last limits are retained self.helper.percentile", "= data state.comp = data.id['x'] assert state.lower == -2 assert", "= self.data def test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min ==", "not self.helper.log self.state.comp = self.x_id assert self.helper.lower == -122 assert", "self.helper._max_n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2", "layer = CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max", "state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert", "CallbackProperty() percentile = CallbackProperty() log = CallbackProperty() state = SimpleState()", "state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin == 15", "data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class", "11 def test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin works as", "= self.y_id assert self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits() assert", "self.helper.lower == -122 assert self.helper.upper == 234 assert self.helper.log self.state.comp", "\"\"\" a: 2 b: hello flat: <CallbackList with 3 elements>", "log = CallbackProperty(False) scale = CallbackProperty(100) self.state = SimpleState() self.helper", "3, 10000), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer =", "self.x_id assert self.helper.value == 42 class TestStateAttributeHistogramHelper(): def setup_method(self, method):", "data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3,", "== -35 def test_manual_edit(self): self.state.val = 42. assert self.helper.value ==", "'f', 'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer", "CallbackProperty() common = CallbackProperty() state = SimpleState() helper = StateAttributeHistogramHelper(state,", "in this case we don't want to # override the", "== 3 assert not self.helper.log self.state.comp = self.x_id assert self.helper.lower", "we modify the internal defaults rather than making a new", "Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection = DataCollection([self.data])", "when edited manually self.helper.percentile = \"Custom\" self.state.lower = -122 self.state.upper", "as expected if True from start data = Data(x=[-3.2, 4.3,", "[1, 3, sub_state] assert str(state1) == EXPECTED_STR.strip() assert repr(state1) ==", "values. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data')", "self.helper.log self.state.comp = self.x_id assert self.helper.lower == -122 assert self.helper.upper", "2 assert self.helper.upper == 3 self.helper.attribute = self.x_id assert self.helper.lower", "= 99.5 self.state.comp = self.y_id assert self.helper.percentile == 100 self.helper.percentile", "state1.flat = [1, 3, 4] sub_state = SimpleTestState() sub_state.a =", "CallbackProperty() self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max',", "self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 self.helper.attribute", "= [] state1.nested = [1, 3, sub_state] state2 = clone(state1)", "-122 self.state.upper = 234 self.helper.log = True assert self.helper.lower ==", "100, 10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection = DataCollection([self.data]) class", "[] EXPECTED_STR = \"\"\" a: 2 b: hello flat: <CallbackList", "== +100 assert self.helper.upper == -100 # Make sure that", "self.x_id assert self.helper.lower == -122 assert self.helper.upper == 234 assert", "= StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data self.state.comp =", "state2.nested[2].a == 3 assert state2.nested[2].b == 'blah' assert state2.nested[2].flat ==", "x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() self.state", "= [1, 3, 4] sub_state = SimpleTestState() sub_state.a = 3", "15 def test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5", "== 3 self.helper.attribute = self.x_id assert self.helper.lower == -100 assert", "= 7 self.state.n_bin = 8 self.state.comp = self.data.id['y'] self.state.x_min =", "== 2 class DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64(): state1", "== 7.2 assert self.state.n_bin == 15 def test_default_categorical(self): self.state.comp =", "self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data =", "7.2 assert self.state.n_bin == 4 self.state.comp = self.data.id['y'] assert self.state.x_min", "= CallbackProperty(True) state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min',", "from .test_state import clone from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper,", "state2.nested[0:2] == [1, 3] assert state2.nested[2].a == 3 assert state2.nested[2].b", "assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When switching to custom, the", "[1, 3, 4] assert state2.nested[0:2] == [1, 3] assert state2.nested[2].a", "DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower", "== 3 def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min = 2", "self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self): assert self.helper.lower", "= SimpleState() state.lower = 1 state.upper = 2 state.comp =", "= CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin =", "+3 state.log = False state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper,", "self.helper.lower == +100 assert self.helper.upper == -100 # Make sure", "self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_caching(self): self.state.comp", "import numpy as np from numpy.testing import assert_allclose from echo", "assert self.helper.lower == -100 assert self.helper.upper == +100 def test_change_attribute(self):", "7.2 assert self.state.n_bin == 15 def test_default_categorical(self): self.state.comp = self.data.id['y']", "def test_percentile_cached(self): # Make sure that if we change scale", "== +3 state.log = False state.percentile = 99 assert_allclose(state.lower, -1.97)", "== 3.5 assert self.state.n_bin == 4 def test_hitting_limits(self): # FIXME:", "str(state1) == EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def", "nested: <CallbackList with 3 elements> > \"\"\" def test_state_str_repr(): state1", "= [1, 3, sub_state] assert str(state1) == EXPECTED_STR.strip() assert repr(state1)", "self.helper.percentile = 99.5 self.state.comp = self.y_id assert self.helper.percentile == 100", "False state.n_bin = 13 state.comp = data.id['x'] assert state.n_bin ==", "data.id['x'] assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): # Make sure", "attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp =", "-3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 4 self.state.comp", "self.helper.upper == -100 def test_manual_edit(self): # Make sure that values", "self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self): assert self.helper.value == -35.", "state.upper == +3 state.log = False state.percentile = 99 assert_allclose(state.lower,", "= self.x_id assert self.helper.lower == -100 assert self.helper.upper == +100", "method): self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data')", "def test_nan_inf_minmax(): data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data')", "= CallbackProperty() state = SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', #", "but no log. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3,", "from echo import CallbackProperty, ListCallbackProperty from glue.core import Data, DataCollection", "upper = CallbackProperty() scale = CallbackProperty() state = SimpleState() state.comp", "self.helper.lower == +100 assert self.helper.upper == -100 def test_manual_edit(self): #", "assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper,", "CallbackProperty() val = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state,", "= CallbackProperty(100) self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower',", "== -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 4", "assert_allclose from echo import CallbackProperty, ListCallbackProperty from glue.core import Data,", "this case we don't want to # override the existing", "1, -2, np.inf, np.nan], label='test_data') class SimpleState(State): layer = CallbackProperty()", "= \"Custom\" self.state.lower = -122 self.state.upper = 234 self.helper.log =", "z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer = CallbackProperty() comp", "values - in this case we don't want to #", "# but some attributes were set to values - in", "Data, DataCollection from .test_state import clone from ..state_objects import (State,", "modes are cached on a per-attribute basis. self.helper.percentile = 99.5", "4.3, 2.2, 5.4, 7.2, -1.1, 2.3], y=['a', 'f', 'd', 'e',", "a: 2 b: hello flat: <CallbackList with 3 elements> nested:", "def test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert", "= 3.5 self.state.n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min", "method): self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3],", "= 99 self.state.comp = self.x_id assert self.helper.percentile == 99.5 self.state.comp", "..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a =", "SimpleTestState() sub_state.a = 3 sub_state.b = 'blah' sub_state.flat = [1,", "Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection = DataCollection([self.data])", "assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100,", "self.state.x_min == 1.5 assert self.state.x_max == 3.5 assert self.state.n_bin ==", "sub_state = SimpleTestState() sub_state.a = 3 sub_state.b = 'blah' sub_state.flat", "no log. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000),", "1.5 self.state.x_max = 3.5 self.state.n_bin = 3 self.state.comp = self.data.id['x']", "change attribute, the scale # modes are cached on a", "class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() val =", "TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data = Data(x=[-3.2, 4.3, 2.2, 5.4,", "state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['y'] assert", "2.5 self.state.comp = self.x_id assert self.helper.value == 42 class TestStateAttributeHistogramHelper():", "3 assert state2.nested[2].b == 'blah' assert state2.nested[2].flat == [1, 2]", "rather than making a new # state helper, but this", "100 self.helper.percentile = 99 self.state.comp = self.x_id assert self.helper.percentile ==", "state.lower = 1 state.upper = 2 state.comp = data.id['x'] helper", "self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection", "# FIXME: here we modify the internal defaults rather than", "common_n_bin='common') state.data = data state.comp = data.id['x'] state.n_bin = 9", "= self.x_id assert self.helper.percentile == 99.5 self.state.comp = self.y_id assert", "self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data", "crash if the state class had a # percentile attribute", "= DatetimeState() state1.a = np.datetime64(100, 'D') state2 = clone(state1) assert", "np.datetime64(100, 'D') state2 = clone(state1) assert state2.a == np.datetime64(100, 'D')", "[1, 3] assert state2.nested[2].a == 3 assert state2.nested[2].b == 'blah'", "assert self.state.x_max == 7 assert self.state.n_bin == 8 self.state.comp =", "= CallbackProperty() common = CallbackProperty() state = SimpleState() helper =", "= CallbackProperty() upper = CallbackProperty() state = SimpleState() state.lower =", "assert self.helper.value == -35. def test_change_attribute(self): self.helper.attribute = self.y_id assert", "self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data", "upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp = data.id['x'] state.n_bin", "= True state.comp = data.id['x'] assert state.n_bin == 12 state.n_bin", "Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer", "SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile',", "== -122 assert self.helper.upper == 234 assert self.helper.log class TestStateAttributeSingleValueHelper():", "# Make sure that values were re-cached when flipping self.state.comp", "SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty()", "attribute, the scale # modes are cached on a per-attribute", "= CallbackProperty() flat = ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization():", "42 class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data = Data(x=[-3.2, 4.3,", "label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp", "3 self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max", "data.id['z'] assert state.n_bin == 9 state.n_bin = 12 state.common =", "data.id['x'] state.n_bin = 9 state.comp = data.id['z'] assert state.n_bin ==", "1 state.upper = 2 state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state,", "self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def", "= self.y_id assert self.helper.percentile == 100 self.helper.percentile = 99 self.state.comp", "self.helper.flip_limits() assert self.helper.lower == +100 assert self.helper.upper == -100 #", "self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp =", "99 self.state.comp = self.x_id assert self.helper.percentile == 99.5 self.state.comp =", "self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max == 7.2 assert", "test_state_serialization(): state1 = SimpleTestState() state1.a = 2 state1.b = 'hello'", "self.state.comp = self.x_id assert self.helper.lower == +100 assert self.helper.upper ==", "the existing values. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3,", "== 11 def test_limits_helper_initial_values(): # Regression test for a bug", "ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState() state1.a = 2 state1.b", "for a bug that occurred if the limits cache was", "self.helper.percentile == 99.5 self.state.comp = self.y_id assert self.helper.percentile == 99", "self.state.n_bin = 8 self.state.comp = self.data.id['y'] self.state.x_min = 1.5 self.state.x_max", "= 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression", "percentile attribute but no log. data = Data(x=np.linspace(-100, 100, 10000),", "3 state.comp = data.id['z'] assert state.n_bin == 15 state.n_bin =", "= 2 self.state.x_max = 7 self.state.n_bin = 8 self.state.comp =", "self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower", "3, 4] assert state2.nested[0:2] == [1, 3] assert state2.nested[2].a ==", "self.helper.attribute = self.x_id assert self.helper.lower == -100 assert self.helper.upper ==", "data.id['x'] state.n_bin = 9 state.comp = data.id['y'] assert state.n_bin ==", "y=np.linspace(2, 3, 9999), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer", "retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self):", "self.y_id = self.data.main_components[1] def test_minmax(self): assert self.helper.lower == -100 assert", "assert self.state.n_bin == 4 def test_hitting_limits(self): # FIXME: here we", "'f', 'd', 'e', 'f', 'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data])", "lower = CallbackProperty() upper = CallbackProperty() state = SimpleState() state.lower", "sure that if we change scale and change attribute, the", "sub_state.a = 3 sub_state.b = 'blah' sub_state.flat = [1, 2]", "self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max ==", "state.log = True assert state.lower == +1 assert state.upper ==", "log = CallbackProperty() state = SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp',", "elements> \"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState a: 2 b: hello", "True from start data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f',", "layer = CallbackProperty() comp = CallbackProperty() val = CallbackProperty() self.state", "2 assert self.helper.upper == 3 self.state.comp = self.x_id assert self.helper.lower", "2.3], y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data') self.data_collection", "assert self.helper.lower == -100 assert self.helper.upper == +100 def test_change_percentile(self):", "comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() state", "self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100", "3.5 self.state.n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min ==", "Make sure that if we change scale and change attribute,", "setup_method(self, method): self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000),", "+100 assert self.helper.upper == -100 # Make sure that values", "label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min", "case we don't want to # override the existing values.", "== -100 assert self.helper.upper == +100 def test_change_percentile(self): # Changing", "== +1 assert state.upper == +3 state.log = False state.percentile", "test_limits_helper_initial_values(): # Regression test for a bug that occurred if", "from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a", "2 b: hello flat: <CallbackList with 3 elements> nested: <CallbackList", "\"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState a: 2 b: hello flat:", "Make sure that values were re-cached when flipping self.state.comp =", "42. assert self.helper.value == 42 self.state.comp = self.y_id assert self.helper.value", "self.helper.upper == 234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method):", "[1, 3, sub_state] state2 = clone(state1) assert state2.a == 2", "4] sub_state = SimpleTestState() sub_state.a = 3 sub_state.b = 'blah'", "self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 self.state.comp", "42 self.state.comp = self.y_id assert self.helper.value == 2.5 self.state.comp =", "was empty # but some attributes were set to values", "want to # override the existing values. data = Data(x=np.linspace(-100,", "= 9 state.comp = data.id['y'] assert state.n_bin == 3 state.comp", "<reponame>HPLegion/glue import numpy as np from numpy.testing import assert_allclose from", "+90) # When switching to custom, the last limits are", "== +100 def test_change_percentile(self): # Changing scale mode updates the", "assert self.state.x_max == 7.2 assert self.state.n_bin == 15 def test_default_categorical(self):", "= CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max =", "a crash if the state class had a # percentile", "self.state.x_max == 7 assert self.state.n_bin == 8 self.state.comp = self.data.id['y']", "b = CallbackProperty() flat = ListCallbackProperty() nested = ListCallbackProperty() def", "3 self.helper.attribute = self.x_id assert self.helper.lower == -100 assert self.helper.upper", "FIXME: here we modify the internal defaults rather than making", "CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty()", "-90) assert_allclose(self.helper.upper, +90) # When switching to custom, the last", "== 100 self.helper.percentile = 99 self.state.comp = self.x_id assert self.helper.percentile", "np.datetime64(100, 'D') def test_nan_inf_minmax(): data = Data(x=[3, 1, -2, np.inf,", "== 3 assert state2.nested[2].b == 'blah' assert state2.nested[2].flat == [1,", "assert self.helper.lower == 2 assert self.helper.upper == 3 assert not", "= CallbackProperty() upper = CallbackProperty() log = CallbackProperty(False) scale =", "== EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self,", "assert self.helper.lower == 2 assert self.helper.upper == 3 self.helper.attribute =", "and change attribute, the scale # modes are cached on", "90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When switching to custom,", "assert not self.helper.log self.state.comp = self.x_id assert self.helper.lower == -122", "# state helper, but this could be improved self.helper._default_n_bin =", "re-cached when edited manually self.helper.percentile = \"Custom\" self.state.lower = -122", "== -2 assert state.upper == +3 state.log = True assert", "we change scale and change attribute, the scale # modes", "assert state.n_bin == 11 def test_limits_helper_initial_values(): # Regression test for", "# Make sure that common_n_bin works as expected if True", "-99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90)", "= CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() self.state =", "state.n_bin == 11 def test_limits_helper_initial_values(): # Regression test for a", "SimpleState() state.lower = 1 state.upper = 2 state.comp = data.id['x']", "class SimpleTestState(State): a = CallbackProperty() b = CallbackProperty() flat =", "helper, but this could be improved self.helper._default_n_bin = 4 self.helper._max_n_bin", "self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection", "def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value == 2.5 self.helper.attribute", "clone from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State):", "= self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self): assert self.helper.value ==", "3 elements> > \"\"\" def test_state_str_repr(): state1 = SimpleTestState() state1.a", "function=np.nanmedian, value='val') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id =", "'blah' sub_state.flat = [1, 2] sub_state.nested = [] state1.nested =", "attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data def test_default_numerical(self): self.state.comp", "[] state1.nested = [1, 3, sub_state] state2 = clone(state1) assert", "updates the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5)", "7 assert self.state.n_bin == 8 self.state.comp = self.data.id['y'] assert self.state.x_min", "are re-cached when edited manually self.helper.percentile = \"Custom\" self.state.lower =", "= self.x_id assert self.helper.lower == -122 assert self.helper.upper == 234", "CallbackProperty() state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max',", "self.helper.percentile = \"Custom\" self.state.lower = -122 self.state.upper = 234 self.helper.log", "= data.id['x'] assert state.n_bin == 12 state.n_bin = 11 state.comp", "expected if True from start data = Data(x=[-3.2, 4.3, 2.2],", "EXPECTED_STR = \"\"\" a: 2 b: hello flat: <CallbackList with", "'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer =", "'hello' state1.flat = [1, 3, 4] sub_state = SimpleTestState() sub_state.a", "== 15 state.n_bin = 12 state.common = True state.comp =", "import Data, DataCollection from .test_state import clone from ..state_objects import", "self.y_id = self.data.main_components[1] def test_value(self): assert self.helper.value == -35. def", "= self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max == 7.2", "def test_state_str_repr(): state1 = SimpleTestState() state1.a = 2 state1.b =", "TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2,", "self.data.main_components[1] def test_minmax(self): assert self.helper.lower == -100 assert self.helper.upper ==", "CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty() state = SimpleState()", "assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper,", "state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['z'] assert", "= CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() state =", "lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data def test_default_numerical(self): self.state.comp =", "= SimpleState() state.comp = data.id['x'] state.lower = 2 state.upper =", "to values - in this case we don't want to", "= CallbackProperty() b = CallbackProperty() flat = ListCallbackProperty() nested =", "= 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower,", "assert self.helper.percentile == 99.5 self.state.comp = self.y_id assert self.helper.percentile ==", "= 2 state1.b = 'hello' state1.flat = [1, 3, 4]", "self.helper.lower == 2 assert self.helper.upper == 3 self.helper.attribute = self.x_id", "== +100 assert self.helper.upper == -100 def test_manual_edit(self): # Make", "-35 def test_manual_edit(self): self.state.val = 42. assert self.helper.value == 42", "new # state helper, but this could be improved self.helper._default_n_bin", "3 elements> \"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState a: 2 b:", "= Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State):", "= clone(state1) assert state2.a == 2 assert state2.b == 'hello'", "= [1, 3, 4] sub_state = SimpleTestState() state1.nested = [1,", "modify the internal defaults rather than making a new #", "assert self.state.x_max == 7.2 assert self.state.n_bin == 4 self.state.comp =", "-0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def", "[1, 3, 4] sub_state = SimpleTestState() sub_state.a = 3 sub_state.b", "sub_state = SimpleTestState() state1.nested = [1, 3, sub_state] assert str(state1)", "state.comp = data.id['x'] assert state.n_bin == 12 state.n_bin = 11", "could be improved self.helper._default_n_bin = 4 self.helper._max_n_bin = 3 self.state.comp", "state.comp = data.id['x'] assert state.lower == -2 assert state.upper ==", "had a # percentile attribute but no log. data =", "cached on a per-attribute basis. self.helper.percentile = 99.5 self.state.comp =", "CallbackProperty() flat = ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization(): state1", "switching to custom, the last limits are retained self.helper.percentile =", "3 state.comp = data.id['z'] assert state.n_bin == 11 state.common =", "from numpy.testing import assert_allclose from echo import CallbackProperty, ListCallbackProperty from", "value='val') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0]", "= Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3], y=['a', 'f',", "self.state.x_min == -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin ==", "here we modify the internal defaults rather than making a", "import assert_allclose from echo import CallbackProperty, ListCallbackProperty from glue.core import", "StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty() b = CallbackProperty() flat", "self.data.id['x'] assert self.state.x_min == 2 assert self.state.x_max == 7 assert", "== 2 assert self.state.x_max == 7 assert self.state.n_bin == 8", "self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2],", "y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer", "that occurred if the limits cache was empty # but", "defaults rather than making a new # state helper, but", "'hello' assert state2.flat == [1, 3, 4] assert state2.nested[0:2] ==", "assert_allclose(self.helper.upper, +90) # When switching to custom, the last limits", "edited manually self.helper.percentile = \"Custom\" self.state.lower = -122 self.state.upper =", "\"Custom\" self.state.lower = -122 self.state.upper = 234 self.helper.log = True", "= CallbackProperty() self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min',", "CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a = np.datetime64(100, 'D')", "CallbackProperty() log = CallbackProperty() state = SimpleState() helper = StateAttributeLimitsHelper(state,", "the last limits are retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90)", "state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin", "= True assert state.lower == +1 assert state.upper == +3", "CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() scale = CallbackProperty()", "= SimpleTestState() state1.nested = [1, 3, sub_state] assert str(state1) ==", "self.data def test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2", "= 11 state.comp = data.id['y'] assert state.n_bin == 3 state.comp", "state2 = clone(state1) assert state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax():", "CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() state = SimpleState()", "assert str(state1) == EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper():", "import clone from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class", "state1.nested = [1, 3, sub_state] state2 = clone(state1) assert state2.a", "= CallbackProperty() log = CallbackProperty(False) scale = CallbackProperty(100) self.state =", "= self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max == 3.5", "upper = CallbackProperty() state = SimpleState() state.lower = 1 state.upper", "state2.flat == [1, 3, 4] assert state2.nested[0:2] == [1, 3]", "x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty() state", "== 12 state.n_bin = 11 state.comp = data.id['y'] assert state.n_bin", "self.state.x_min == 2 assert self.state.x_max == 7 assert self.state.n_bin ==", "15 state.n_bin = 12 state.common = True state.comp = data.id['x']", "self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90", "flat = ListCallbackProperty() nested = ListCallbackProperty() def test_state_serialization(): state1 =", "== 3.5 assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data =", "common = CallbackProperty() state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp',", "11 def test_limits_helper_initial_values(): # Regression test for a bug that", "than making a new # state helper, but this could", "flat: <CallbackList with 3 elements> nested: <CallbackList with 3 elements>", "= self.data.id['y'] assert self.state.x_min == 1.5 assert self.state.x_max == 3.5", "= False state.n_bin = 13 state.comp = data.id['x'] assert state.n_bin", "== 2.5 self.helper.attribute = self.x_id assert self.helper.value == -35 def", "(State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty() b", "occurred if the limits cache was empty # but some", "= CallbackProperty() state = SimpleState() state.comp = data.id['x'] state.lower =", "self.helper.upper == 234 assert self.helper.log self.state.comp = self.y_id assert self.helper.lower", "9 state.n_bin = 12 state.common = True state.comp = data.id['x']", "-2 assert state.upper == +3 state.log = True assert state.lower", "# Regression test for a bug that occurred if the", "self.helper.lower == -122 assert self.helper.upper == 234 assert self.helper.log class", "test_percentile_no_log(): # Regression test for a bug that caused a", "def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100 assert self.helper.upper ==", "self.helper.lower == -100 assert self.helper.upper == +100 def test_change_percentile(self): #", "np from numpy.testing import assert_allclose from echo import CallbackProperty, ListCallbackProperty", "setup_method(self, method): self.data = Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999),", "= True assert self.helper.lower == -122 assert self.helper.upper == 234", "val = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',", "state.n_bin == 12 state.n_bin = 11 state.comp = data.id['y'] assert", "data.id['z'] assert state.n_bin == 11 state.common = False state.n_bin =", "were set to values - in this case we don't", "= CallbackProperty(False) scale = CallbackProperty(100) self.state = SimpleState() self.helper =", "== 234 assert self.helper.log self.state.comp = self.y_id assert self.helper.lower ==", "DataCollection from .test_state import clone from ..state_objects import (State, StateAttributeLimitsHelper,", "elements> > \"\"\" def test_state_str_repr(): state1 = SimpleTestState() state1.a =", "# modes are cached on a per-attribute basis. self.helper.percentile =", "Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3], y=['a', 'f', 'd',", "state.upper = 4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper', percentile='scale')", "assert self.helper.lower == +100 assert self.helper.upper == -100 # Make", "attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data = self.data self.state.comp =", "8 self.state.comp = self.data.id['y'] self.state.x_min = 1.5 self.state.x_max = 3.5", "test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max", "assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make sure that if we", "x_min = CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common", "= CallbackProperty() n_bin = CallbackProperty() self.state = SimpleState() self.helper =", "Changing scale mode updates the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower,", "helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower == 1", "True state.comp = data.id['x'] assert state.n_bin == 12 state.n_bin =", "assert state2.nested[2].b == 'blah' assert state2.nested[2].flat == [1, 2] assert", "upper = CallbackProperty() log = CallbackProperty(False) scale = CallbackProperty(100) self.state", "test_state_str_repr(): state1 = SimpleTestState() state1.a = 2 state1.b = 'hello'", "assert self.state.n_bin == 8 self.state.comp = self.data.id['y'] assert self.state.x_min ==", "state.n_bin = 9 state.comp = data.id['y'] assert state.n_bin == 3", "clone(state1) assert state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax(): data =", "helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile', log='log')", "state.comp = data.id['x'] assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): #", "11 state.comp = data.id['y'] assert state.n_bin == 3 state.comp =", "from start data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'],", "state.upper = 2 state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp',", "30, 9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection = DataCollection([self.data]) class", "y=np.linspace(2, 3, 10000), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer", "self.helper.value == -35. def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value", "= self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id =", "3 def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min = 2 self.state.x_max", "StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty() b =", "= CallbackProperty() log = CallbackProperty() state = SimpleState() helper =", "3, 10000), label='test_data') class SimpleState(State): layer = CallbackProperty() comp =", "state.comp = data.id['x'] state.lower = 2 state.upper = 4 helper", "== 2 assert self.helper.upper == 3 self.state.comp = self.x_id assert", "test_percentile_cached(self): # Make sure that if we change scale and", "change scale and change attribute, the scale # modes are", "assert self.helper.value == 42 self.state.comp = self.y_id assert self.helper.value ==", "for a bug that caused a crash if the state", "-122 assert self.helper.upper == 234 assert self.helper.log self.state.comp = self.y_id", "== 234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data", "1 assert helper.upper == 2 class DatetimeState(State): a = CallbackProperty()", "> \"\"\" def test_state_str_repr(): state1 = SimpleTestState() state1.a = 2", "set to values - in this case we don't want", "sub_state.nested = [] state1.nested = [1, 3, sub_state] state2 =", "self.helper._default_n_bin = 4 self.helper._max_n_bin = 3 self.state.comp = self.data.id['x'] assert", "= data.id['z'] assert state.n_bin == 11 state.common = False state.n_bin", "\"\"\" <SimpleTestState a: 2 b: hello flat: <CallbackList with 3", "99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99)", "state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin',", "CallbackProperty() state = SimpleState() state.lower = 1 state.upper = 2", "np.inf, np.nan], label='test_data') class SimpleState(State): layer = CallbackProperty() comp =", "n_bin = CallbackProperty() common = CallbackProperty() state = SimpleState() helper", "log. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data')", "self.state.comp = self.data.id['x'] assert self.state.x_min == 2 assert self.state.x_max ==", "= SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common')", "= 12 state.common = True state.comp = data.id['x'] assert state.n_bin", "self.state.x_min == -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin ==", "state.n_bin = 13 state.comp = data.id['x'] assert state.n_bin == 11", "10000), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty()", "3 sub_state.b = 'blah' sub_state.flat = [1, 2] sub_state.nested =", "self.state.comp = self.data.id['y'] self.state.x_min = 1.5 self.state.x_max = 3.5 self.state.n_bin", "self.x_id assert self.helper.lower == +100 assert self.helper.upper == -100 def", "== -35. def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value ==", "== 15 def test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min ==", "clone(state1) assert state2.a == 2 assert state2.b == 'hello' assert", "99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100 assert self.helper.upper", "assert self.helper.lower == +100 assert self.helper.upper == -100 def test_manual_edit(self):", "test_value(self): assert self.helper.value == -35. def test_change_attribute(self): self.helper.attribute = self.y_id", "limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile =", "== [] EXPECTED_STR = \"\"\" a: 2 b: hello flat:", "test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1,", "= CallbackProperty() state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min',", "scale = CallbackProperty() state = SimpleState() state.comp = data.id['x'] state.lower", "CallbackProperty() scale = CallbackProperty() state = SimpleState() state.comp = data.id['x']", "self.helper.upper == -100 # Make sure that values were re-cached", "9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State):", "CallbackProperty() comp = CallbackProperty() val = CallbackProperty() self.state = SimpleState()", "self.helper.value == -35 def test_manual_edit(self): self.state.val = 42. assert self.helper.value", "CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() self.state = SimpleState()", "3, 9999), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer =", "helper.lower == 1 assert helper.upper == 2 class DatetimeState(State): a", "but some attributes were set to values - in this", "3] assert state2.nested[2].a == 3 assert state2.nested[2].b == 'blah' assert", "= CallbackProperty() val = CallbackProperty() self.state = SimpleState() self.helper =", "= data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower", "limits cache was empty # but some attributes were set", "SimpleTestState() state1.a = 2 state1.b = 'hello' state1.flat = [1,", "data.id['x'] assert state.lower == -2 assert state.upper == +3 state.log", "state.upper == +3 state.log = True assert state.lower == +1", "self.helper.lower == 2 assert self.helper.upper == 3 self.state.comp = self.x_id", "sub_state] state2 = clone(state1) assert state2.a == 2 assert state2.b", "cache was empty # but some attributes were set to", "numpy.testing import assert_allclose from echo import CallbackProperty, ListCallbackProperty from glue.core", "assert self.helper.upper == 3 self.helper.attribute = self.x_id assert self.helper.lower ==", "True assert self.helper.lower == -122 assert self.helper.upper == 234 assert", "self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self): assert", "assert self.helper.value == -35 def test_manual_edit(self): self.state.val = 42. assert", "if the limits cache was empty # but some attributes", "from glue.core import Data, DataCollection from .test_state import clone from", "2.2, 5.4, 7.2, -1.1, 2.3], y=['a', 'f', 'd', 'e', 'f',", "DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min", "upper='upper') assert helper.lower == 1 assert helper.upper == 2 class", "flipping self.state.comp = self.y_id assert self.helper.lower == 2 assert self.helper.upper", "state.comp = data.id['x'] assert state.n_bin == 11 def test_limits_helper_initial_values(): #", "== +3 state.log = True assert state.lower == +1 assert", "state.n_bin = 12 state.common = True state.comp = data.id['x'] assert", "lower = CallbackProperty() upper = CallbackProperty() percentile = CallbackProperty() log", "hello flat: <CallbackList with 3 elements> nested: <CallbackList with 3", "the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile", "= CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty() state =", "a bug that caused a crash if the state class", "== 8 self.state.comp = self.data.id['y'] assert self.state.x_min == 1.5 assert", "per-attribute basis. self.helper.percentile = 99.5 self.state.comp = self.y_id assert self.helper.percentile", "comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() log", "= self.y_id assert self.helper.value == 2.5 self.state.comp = self.x_id assert", "self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin')", "2 assert self.helper.upper == 3 assert not self.helper.log self.state.comp =", "= self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3", "3 self.state.comp = self.x_id assert self.helper.lower == +100 assert self.helper.upper", "assert self.helper.upper == 3 self.state.comp = self.x_id assert self.helper.lower ==", "improved self.helper._default_n_bin = 4 self.helper._max_n_bin = 3 self.state.comp = self.data.id['x']", "assert self.helper.upper == +100 def test_change_percentile(self): # Changing scale mode", "self.state.x_max = 7 self.state.n_bin = 8 self.state.comp = self.data.id['y'] self.state.x_min", "assert state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax(): data = Data(x=[3,", "state = SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower',", "= self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self):", "on a per-attribute basis. self.helper.percentile = 99.5 self.state.comp = self.y_id", "state.comp = data.id['y'] assert state.n_bin == 3 state.comp = data.id['z']", "12 state.common = True state.comp = data.id['x'] assert state.n_bin ==", "assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data", "99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90)", "scale = CallbackProperty(100) self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',", "self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1]", "that values are re-cached when edited manually self.helper.percentile = \"Custom\"", "assert self.helper.value == 42 class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data", "== 1 assert helper.upper == 2 class DatetimeState(State): a =", "= data.id['x'] assert state.lower == -2 assert state.upper == +3", "state1.nested = [1, 3, sub_state] assert str(state1) == EXPECTED_STR.strip() assert", "state2.b == 'hello' assert state2.flat == [1, 3, 4] assert", "def test_minmax(self): assert self.helper.lower == -100 assert self.helper.upper == +100", "assert self.state.x_min == 1.5 assert self.state.x_max == 3.5 assert self.state.n_bin", "assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile", "= StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data", "= Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection =", "state = SimpleState() state.comp = data.id['x'] state.lower = 2 state.upper", "state.comp = data.id['z'] assert state.n_bin == 11 state.common = False", "state.comp = data.id['z'] assert state.n_bin == 9 state.n_bin = 12", "== 99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100 assert", "test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower == 2 assert self.helper.upper", "state class had a # percentile attribute but no log.", "last limits are retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper,", "10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer = CallbackProperty()", "assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): # Make sure that", "-35. def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value == 2.5", "assert self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute = self.y_id assert", "with 3 elements> nested: <CallbackList with 3 elements> \"\"\" EXPECTED_REPR", "self.helper.value == 2.5 self.helper.attribute = self.x_id assert self.helper.value == -35", "Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data')", "3 self.state.comp = self.data.id['x'] assert self.state.x_min == 2 assert self.state.x_max", "= data state.comp = data.id['x'] state.n_bin = 9 state.comp =", "that values were re-cached when flipping self.state.comp = self.y_id assert", "data.id['x'] state.lower = 2 state.upper = 4 helper = StateAttributeLimitsHelper(state,", "state2.nested[2].flat == [1, 2] assert state2.nested[2].nested == [] EXPECTED_STR =", "self.y_id assert self.helper.lower == 2 assert self.helper.upper == 3 assert", "self.state.n_bin == 4 def test_hitting_limits(self): # FIXME: here we modify", "3, sub_state] assert str(state1) == EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip()", "with 3 elements> > \"\"\" def test_state_str_repr(): state1 = SimpleTestState()", "== 42 class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data = Data(x=[-3.2,", "state.lower = 2 state.upper = 4 helper = StateAttributeLimitsHelper(state, attribute='comp',", "4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper', percentile='scale') state.scale =", "-99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99)", "= data.id['z'] assert state.n_bin == 15 state.n_bin = 12 state.common", "self.helper.upper == 3 self.state.comp = self.x_id assert self.helper.lower == +100", "n_bin='n_bin', common_n_bin='common') state.data = data state.comp = data.id['x'] state.n_bin =", "state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin == 11", "assert self.helper.value == 2.5 self.helper.attribute = self.x_id assert self.helper.value ==", "2] sub_state.nested = [] state1.nested = [1, 3, sub_state] state2", "state2.a == 2 assert state2.b == 'hello' assert state2.flat ==", "CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty()", "np.nan], label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty()", "== 3 self.state.comp = self.x_id assert self.helper.lower == +100 assert", "assert self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower ==", "self.helper.percentile == 100 self.helper.percentile = 99 self.state.comp = self.x_id assert", "= \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make", "== 2 assert self.helper.upper == 3 self.helper.attribute = self.x_id assert", "self.helper.lower == 2 assert self.helper.upper == 3 assert not self.helper.log", "state helper, but this could be improved self.helper._default_n_bin = 4", "don't want to # override the existing values. data =", "comp = CallbackProperty() val = CallbackProperty() self.state = SimpleState() self.helper", "self.state.x_min = 1.5 self.state.x_max = 3.5 self.state.n_bin = 3 self.state.comp", "self.helper.upper == 3 self.helper.attribute = self.x_id assert self.helper.lower == -100", "-100 assert self.helper.upper == +100 def test_change_percentile(self): # Changing scale", "3, sub_state] state2 = clone(state1) assert state2.a == 2 assert", "'a'], label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty()", "class had a # percentile attribute but no log. data", "= CallbackProperty() upper = CallbackProperty() percentile = CallbackProperty() log =", "percentile='percentile', log='log') state.data = data state.comp = data.id['x'] assert state.lower", "= 4 self.helper._max_n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min", "caused a crash if the state class had a #", "99.5 self.state.comp = self.y_id assert self.helper.percentile == 99 def test_flip_button(self):", "label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower", "== -100 # Make sure that values were re-cached when", "= CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty(True) state =", "when flipping self.state.comp = self.y_id assert self.helper.lower == 2 assert", "= StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data def", "# percentile attribute but no log. data = Data(x=np.linspace(-100, 100,", "2.5 self.helper.attribute = self.x_id assert self.helper.value == -35 def test_manual_edit(self):", "= 4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper', percentile='scale') state.scale", "def test_change_percentile(self): # Changing scale mode updates the limits self.helper.percentile", "-90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make sure that if", "+100 def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower == 2", "noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data = data state.comp =", "== 'blah' assert state2.nested[2].flat == [1, 2] assert state2.nested[2].nested ==", "<CallbackList with 3 elements> > \"\"\" def test_state_str_repr(): state1 =", "== -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 4", "= self.data.main_components[1] def test_minmax(self): assert self.helper.lower == -100 assert self.helper.upper", "== -122 assert self.helper.upper == 234 assert self.helper.log self.state.comp =", "# Make sure that if we change scale and change", "= 42. assert self.helper.value == 42 self.state.comp = self.y_id assert", "state.data = data state.comp = data.id['x'] state.n_bin = 9 state.comp", "data.id['y'] assert state.n_bin == 3 state.comp = data.id['z'] assert state.n_bin", "== 9 state.n_bin = 12 state.common = True state.comp =", "= [1, 3, sub_state] state2 = clone(state1) assert state2.a ==", "self.state.comp = self.y_id assert self.helper.lower == 2 assert self.helper.upper ==", "CallbackProperty() state = SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa", ".test_state import clone from ..state_objects import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper)", "True assert state.lower == +1 assert state.upper == +3 state.log", "-100 assert self.helper.upper == +100 def test_change_attribute(self): self.helper.attribute = self.y_id", "= SimpleTestState() sub_state.a = 3 sub_state.b = 'blah' sub_state.flat =", "CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty()", "lower = CallbackProperty() upper = CallbackProperty() log = CallbackProperty(False) scale", "self.helper.upper == +100 def test_change_percentile(self): # Changing scale mode updates", "x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty(True) state", "= data.id['x'] state.lower = 2 state.upper = 4 helper =", "re-cached when flipping self.state.comp = self.y_id assert self.helper.lower == 2", "'blah' assert state2.nested[2].flat == [1, 2] assert state2.nested[2].nested == []", "assert state.lower == +1 assert state.upper == +3 state.log =", "lower='lower', upper='upper', percentile='percentile', log='log') state.data = data state.comp = data.id['x']", "= SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log')", "def test_limits_helper_initial_values(): # Regression test for a bug that occurred", "def test_percentile_no_log(): # Regression test for a bug that caused", "assert self.helper.upper == -100 def test_manual_edit(self): # Make sure that", "common = CallbackProperty(True) state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp',", "echo import CallbackProperty, ListCallbackProperty from glue.core import Data, DataCollection from", "self.state.x_min = 2 self.state.x_max = 7 self.state.n_bin = 8 self.state.comp", "n_bin='n_bin') self.state.data = self.data def test_default_numerical(self): self.state.comp = self.data.id['x'] assert", "-2, np.inf, np.nan], label='test_data') class SimpleState(State): layer = CallbackProperty() comp", "a # percentile attribute but no log. data = Data(x=np.linspace(-100,", "sure that values were re-cached when flipping self.state.comp = self.y_id", "assert state.n_bin == 11 state.common = False state.n_bin = 13", "= 3 sub_state.b = 'blah' sub_state.flat = [1, 2] sub_state.nested", "'d', 'e', 'f', 'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data]) class", "4 self.helper._max_n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min ==", "3.5 assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2,", "that common_n_bin works as expected if True from start data", "2] assert state2.nested[2].nested == [] EXPECTED_STR = \"\"\" a: 2", "= StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data = self.data", "assert self.helper.log self.state.comp = self.y_id assert self.helper.lower == 2 assert", "be improved self.helper._default_n_bin = 4 self.helper._max_n_bin = 3 self.state.comp =", "self.helper.log self.state.comp = self.y_id assert self.helper.lower == 2 assert self.helper.upper", "upper='x_max', n_bin='n_bin') self.state.data = self.data def test_default_numerical(self): self.state.comp = self.data.id['x']", "if True from start data = Data(x=[-3.2, 4.3, 2.2], y=['a',", "test_hitting_limits(self): # FIXME: here we modify the internal defaults rather", "assert self.helper.upper == 234 assert self.helper.log self.state.comp = self.y_id assert", "self.x_id assert self.helper.value == -35 def test_manual_edit(self): self.state.val = 42.", "= CallbackProperty() scale = CallbackProperty() state = SimpleState() state.comp =", "a bug that occurred if the limits cache was empty", "CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty(True) state = SimpleState()", "# override the existing values. data = Data(x=np.linspace(-100, 100, 10000),", "= CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() log =", "the state class had a # percentile attribute but no", "state = SimpleState() state.lower = 1 state.upper = 2 state.comp", "attribute='comp', lower='lower', upper='upper') assert helper.lower == 1 assert helper.upper ==", "state1.b = 'hello' state1.flat = [1, 3, 4] sub_state =", "assert self.helper.upper == -100 # Make sure that values were", "<CallbackList with 3 elements> \"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState a:", "CallbackProperty(True) state = SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max',", "state.lower == -2 assert state.upper == +3 state.log = True", "data.id['z'] assert state.n_bin == 15 state.n_bin = 12 state.common =", "def test_manual_edit(self): # Make sure that values are re-cached when", "== 7 assert self.state.n_bin == 8 self.state.comp = self.data.id['y'] assert", "def test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin works as expected", "= CallbackProperty() percentile = CallbackProperty() log = CallbackProperty() state =", "99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression test", "assert state2.a == 2 assert state2.b == 'hello' assert state2.flat", "= data.id['z'] assert state.n_bin == 9 state.n_bin = 12 state.common", "+100 assert self.helper.upper == -100 def test_manual_edit(self): # Make sure", "1.2], label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty()", "CallbackProperty() upper = CallbackProperty() percentile = CallbackProperty() log = CallbackProperty()", "== 3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2], y=['a',", "self.state.comp = self.y_id assert self.helper.percentile == 100 self.helper.percentile = 99", "7 self.state.n_bin = 8 self.state.comp = self.data.id['y'] self.state.x_min = 1.5", "= np.datetime64(100, 'D') state2 = clone(state1) assert state2.a == np.datetime64(100,", "= CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() percentile =", "99.5 self.state.comp = self.y_id assert self.helper.percentile == 100 self.helper.percentile =", "10000), y=np.linspace(2, 3, 10000), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State):", "to custom, the last limits are retained self.helper.percentile = \"Custom\"", "= Data(x=np.linspace(-100, 30, 9999), y=np.linspace(2, 3, 9999), label='test_data') self.data_collection =", "7.2, -1.1, 2.3], y=['a', 'f', 'd', 'e', 'f', 'f', 'a'],", "2 class DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64(): state1 =", "= 3 self.state.comp = self.data.id['x'] assert self.state.x_min == 2 assert", "TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2,", "CallbackProperty(False) scale = CallbackProperty(100) self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state,", "StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data self.state.comp = self.data.id['x']", "self.data.id['x'] self.state.x_min = 2 self.state.x_max = 7 self.state.n_bin = 8", "= [1, 2] sub_state.nested = [] state1.nested = [1, 3,", "common_n_bin works as expected if True from start data =", "3 elements> nested: <CallbackList with 3 elements> \"\"\" EXPECTED_REPR =", "4 self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max", "'hello' state1.flat = [1, 3, 4] sub_state = SimpleTestState() state1.nested", "self.data.id['y'] self.state.x_min = 1.5 self.state.x_max = 3.5 self.state.n_bin = 3", "self.y_id assert self.helper.percentile == 100 self.helper.percentile = 99 self.state.comp =", "= ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState() state1.a = 2", "SimpleState() state.comp = data.id['x'] state.lower = 2 state.upper = 4", "def setup_method(self, method): self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2,", "= 2 state.comp = data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower',", "+90) def test_percentile_cached(self): # Make sure that if we change", "layer = CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper", "= CallbackProperty() comp = CallbackProperty() val = CallbackProperty() self.state =", "== -0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3", "test_state_serialization_datetime64(): state1 = DatetimeState() state1.a = np.datetime64(100, 'D') state2 =", "data state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['y']", "self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self): assert self.helper.lower == -100", "= CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common =", "assert state2.nested[2].a == 3 assert state2.nested[2].b == 'blah' assert state2.nested[2].flat", "CallbackProperty() self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val')", "= 3 self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert", "10000), label='test_data') class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty()", "= 'blah' sub_state.flat = [1, 2] sub_state.nested = [] state1.nested", "- in this case we don't want to # override", "= StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower == 1 assert", "+3 state.log = True assert state.lower == +1 assert state.upper", "CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() log = CallbackProperty(False)", "if we change scale and change attribute, the scale #", "SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data", "mode updates the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper,", "= 'hello' state1.flat = [1, 3, 4] sub_state = SimpleTestState()", "custom, the last limits are retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower,", "3.5 assert self.state.n_bin == 3 def test_caching(self): self.state.comp = self.data.id['x']", "test for a bug that occurred if the limits cache", "9999), label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty()", "== 2.5 self.state.comp = self.x_id assert self.helper.value == 42 class", "= CallbackProperty() state = SimpleState() state.lower = 1 state.upper =", "that if we change scale and change attribute, the scale", "= SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data =", "n_bin = CallbackProperty() common = CallbackProperty(True) state = SimpleState() helper", "setup_method(self, method): self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1,", "state.comp = data.id['z'] assert state.n_bin == 15 state.n_bin = 12", "2 state.upper = 4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper',", "self.state.comp = self.data.id['y'] assert self.state.x_min == 1.5 assert self.state.x_max ==", "self.state.comp = self.data.id['x'] self.state.x_min = 2 self.state.x_max = 7 self.state.n_bin", "self.state.n_bin = 3 self.state.comp = self.data.id['x'] assert self.state.x_min == 2", "test_flip_button(self): self.helper.flip_limits() assert self.helper.lower == +100 assert self.helper.upper == -100", "comp = CallbackProperty() x_min = CallbackProperty() x_max = CallbackProperty() n_bin", "assert self.state.n_bin == 3 def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min", "8 self.state.comp = self.data.id['y'] assert self.state.x_min == 1.5 assert self.state.x_max", "state.lower == +1 assert state.upper == +3 state.log = False", "if the state class had a # percentile attribute but", "assert state2.nested[2].flat == [1, 2] assert state2.nested[2].nested == [] EXPECTED_STR", "some attributes were set to values - in this case", "= 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When switching to", "self.data.main_components[1] def test_value(self): assert self.helper.value == -35. def test_change_attribute(self): self.helper.attribute", "self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99", "11 state.common = False state.n_bin = 13 state.comp = data.id['x']", "Regression test for a bug that occurred if the limits", "= CallbackProperty() comp = CallbackProperty() lower = CallbackProperty() upper =", "self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): #", "self.state.comp = self.x_id assert self.helper.value == 42 class TestStateAttributeHistogramHelper(): def", "test_manual_edit(self): # Make sure that values are re-cached when edited", "= self.data.id['x'] assert self.state.x_min == 2 assert self.state.x_max == 7", "= Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2],", "assert state.n_bin == 9 state.n_bin = 12 state.common = True", "data.id['x'] helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower ==", "elements> nested: <CallbackList with 3 elements> > \"\"\" def test_state_str_repr():", "= CallbackProperty() upper = CallbackProperty() scale = CallbackProperty() state =", "test_histogram_helper_common_n_bin_active(): # Make sure that common_n_bin works as expected if", "were re-cached when flipping self.state.comp = self.y_id assert self.helper.lower ==", "= False state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def", "self.state.upper = 234 self.helper.log = True assert self.helper.lower == -122", "to # override the existing values. data = Data(x=np.linspace(-100, 100,", "self.data.id['y'] assert self.state.x_min == -0.5 assert self.state.x_max == 3.5 assert", "upper='upper', percentile='percentile', log='log') state.data = data state.comp = data.id['x'] assert", "= CallbackProperty() self.state = SimpleState() self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian,", "CallbackProperty() upper = CallbackProperty() scale = CallbackProperty() state = SimpleState()", "attributes were set to values - in this case we", "self.state.x_max == 7.2 assert self.state.n_bin == 15 def test_default_categorical(self): self.state.comp", "self.helper.attribute = self.y_id assert self.helper.value == 2.5 self.helper.attribute = self.x_id", "self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) # When switching", "the scale # modes are cached on a per-attribute basis.", "StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data def test_default_numerical(self):", "assert self.helper.upper == 234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self,", "= data.id['x'] state.n_bin = 9 state.comp = data.id['z'] assert state.n_bin", "state.data = data state.comp = data.id['x'] assert state.lower == -2", "self.helper.attribute = self.y_id assert self.helper.lower == 2 assert self.helper.upper ==", "y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data') self.data_collection =", "helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper', percentile='scale') state.scale = 90", "CallbackProperty() b = CallbackProperty() flat = ListCallbackProperty() nested = ListCallbackProperty()", "self.state.n_bin == 15 def test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min", "= data.id['x'] state.n_bin = 9 state.comp = data.id['y'] assert state.n_bin", "percentile = CallbackProperty() log = CallbackProperty() state = SimpleState() helper", "+99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower, -99) assert_allclose(self.helper.upper, +99) self.helper.percentile =", "values were re-cached when flipping self.state.comp = self.y_id assert self.helper.lower", "def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min = 2 self.state.x_max =", "== 2 assert state2.b == 'hello' assert state2.flat == [1,", "x_max = CallbackProperty() n_bin = CallbackProperty() self.state = SimpleState() self.helper", "ListCallbackProperty from glue.core import Data, DataCollection from .test_state import clone", "StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data = self.data self.state.comp", "self.state.data = self.data def test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min", "basis. self.helper.percentile = 99.5 self.state.comp = self.y_id assert self.helper.percentile ==", "2 assert self.state.x_max == 7 assert self.state.n_bin == 8 self.state.comp", "9 state.comp = data.id['y'] assert state.n_bin == 3 state.comp =", "scale mode updates the limits self.helper.percentile = 99.5 assert_allclose(self.helper.lower, -99.5)", "assert self.state.n_bin == 4 self.state.comp = self.data.id['y'] assert self.state.x_min ==", "this could be improved self.helper._default_n_bin = 4 self.helper._max_n_bin = 3", "sub_state.flat = [1, 2] sub_state.nested = [] state1.nested = [1,", "2 assert state2.b == 'hello' assert state2.flat == [1, 3,", "-100 def test_manual_edit(self): # Make sure that values are re-cached", "self.helper.log = True assert self.helper.lower == -122 assert self.helper.upper ==", "SimpleState() helper = StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data", "that caused a crash if the state class had a", "assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression test for", "3 elements> nested: <CallbackList with 3 elements> > \"\"\" def", "StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty() b = CallbackProperty()", "manually self.helper.percentile = \"Custom\" self.state.lower = -122 self.state.upper = 234", "self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self): assert", "== 2 assert self.helper.upper == 3 assert not self.helper.log self.state.comp", "== 42 self.state.comp = self.y_id assert self.helper.value == 2.5 self.state.comp", "4 def test_hitting_limits(self): # FIXME: here we modify the internal", "== [1, 2] assert state2.nested[2].nested == [] EXPECTED_STR = \"\"\"", "= 1.5 self.state.x_max = 3.5 self.state.n_bin = 3 self.state.comp =", "= self.data.main_components[1] def test_value(self): assert self.helper.value == -35. def test_change_attribute(self):", "3, 4] sub_state = SimpleTestState() sub_state.a = 3 sub_state.b =", "== 3 state.comp = data.id['z'] assert state.n_bin == 15 state.n_bin", "12 state.n_bin = 11 state.comp = data.id['y'] assert state.n_bin ==", "state2 = clone(state1) assert state2.a == 2 assert state2.b ==", "existing values. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2, 3, 10000),", "= CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a = np.datetime64(100,", "self.state.x_max == 3.5 assert self.state.n_bin == 4 def test_hitting_limits(self): #", "test_manual_edit(self): self.state.val = 42. assert self.helper.value == 42 self.state.comp =", "-100 # Make sure that values were re-cached when flipping", "state1 = SimpleTestState() state1.a = 2 state1.b = 'hello' state1.flat", "2.3, 1.2], label='test_data') class SimpleState(State): layer = CallbackProperty() comp =", "= clone(state1) assert state2.a == np.datetime64(100, 'D') def test_nan_inf_minmax(): data", "= 2 state.upper = 4 helper = StateAttributeLimitsHelper(state, attribute='comp', lower='lower',", "def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower == 2 assert", "bug that occurred if the limits cache was empty #", "test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min = 2 self.state.x_max = 7", "2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State):", "self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data self.state.comp", "def test_default_categorical(self): self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert", "self.state.n_bin == 8 self.state.comp = self.data.id['y'] assert self.state.x_min == 1.5", "we don't want to # override the existing values. data", "CallbackProperty, ListCallbackProperty from glue.core import Data, DataCollection from .test_state import", "SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() val = CallbackProperty()", "== 11 state.common = False state.n_bin = 13 state.comp =", "upper='upper', percentile='scale', log='log') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id", "EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 100,", "CallbackProperty() upper = CallbackProperty() log = CallbackProperty(False) scale = CallbackProperty(100)", "'D') state2 = clone(state1) assert state2.a == np.datetime64(100, 'D') def", "n_bin = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp',", "class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data = Data(x=[-3.2, 4.3, 2.2,", "== 4 def test_hitting_limits(self): # FIXME: here we modify the", "False state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log():", "self.state.lower = -122 self.state.upper = 234 self.helper.log = True assert", "state1.flat = [1, 3, 4] sub_state = SimpleTestState() state1.nested =", "'d'], z=[1.1, 2.3, 1.2], label='test_data') class SimpleState(State): layer = CallbackProperty()", "log='log') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0]", "3, 4] sub_state = SimpleTestState() state1.nested = [1, 3, sub_state]", "self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id", "StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data =", "= CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() scale =", "class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() lower =", "StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp", "-0.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 4 def", "self.data.id['y'] assert self.state.x_min == 1.5 assert self.state.x_max == 3.5 assert", "Make sure that values are re-cached when edited manually self.helper.percentile", "sure that values are re-cached when edited manually self.helper.percentile =", "= \"\"\" a: 2 b: hello flat: <CallbackList with 3", "are retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def", "assert helper.lower == 1 assert helper.upper == 2 class DatetimeState(State):", "assert self.helper.lower == 2 assert self.helper.upper == 3 self.state.comp =", "test for a bug that caused a crash if the", "== -100 def test_manual_edit(self): # Make sure that values are", "self.state.val = 42. assert self.helper.value == 42 self.state.comp = self.y_id", "b: hello flat: <CallbackList with 3 elements> nested: <CallbackList with", "scale and change attribute, the scale # modes are cached", "class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 100, 10000),", "+100 def test_change_percentile(self): # Changing scale mode updates the limits", "= self.x_id assert self.helper.value == 42 class TestStateAttributeHistogramHelper(): def setup_method(self,", "= data.id['y'] assert state.n_bin == 3 state.comp = data.id['z'] assert", "== EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100,", "data.id['x'] assert state.n_bin == 12 state.n_bin = 11 state.comp =", "helper.upper == 2 class DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64():", "+1 assert state.upper == +3 state.log = False state.percentile =", "bug that caused a crash if the state class had", "CallbackProperty() common = CallbackProperty(True) state = SimpleState() helper = StateAttributeHistogramHelper(state,", "self.helper.value == 42 self.state.comp = self.y_id assert self.helper.value == 2.5", "the limits cache was empty # but some attributes were", "= Data(x=[3, 1, -2, np.inf, np.nan], label='test_data') class SimpleState(State): layer", "<CallbackList with 3 elements> nested: <CallbackList with 3 elements> \"\"\"", "Data(x=[3, 1, -2, np.inf, np.nan], label='test_data') class SimpleState(State): layer =", "CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() percentile = CallbackProperty()", "== +100 def test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.lower ==", "self.x_id assert self.helper.percentile == 99.5 self.state.comp = self.y_id assert self.helper.percentile", "a = CallbackProperty() b = CallbackProperty() flat = ListCallbackProperty() nested", "the internal defaults rather than making a new # state", "StateAttributeLimitsHelper(state, attribute='comp', lower='lower', upper='upper') assert helper.lower == 1 assert helper.upper", "self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data = self.data", "self.y_id assert self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits() assert self.helper.lower", "= SimpleState() helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa lower='lower', upper='upper',", "SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale', log='log') self.state.data", "= CallbackProperty() common = CallbackProperty(True) state = SimpleState() helper =", "self.x_id assert self.helper.lower == -100 assert self.helper.upper == +100 def", "\"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make sure", "= SimpleTestState() state1.a = 2 state1.b = 'hello' state1.flat =", "def test_value(self): assert self.helper.value == -35. def test_change_attribute(self): self.helper.attribute =", "self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3], y=['a',", "self.state.x_max == 7.2 assert self.state.n_bin == 4 self.state.comp = self.data.id['y']", "<SimpleTestState a: 2 b: hello flat: <CallbackList with 3 elements>", "SimpleTestState(State): a = CallbackProperty() b = CallbackProperty() flat = ListCallbackProperty()", "= 8 self.state.comp = self.data.id['y'] self.state.x_min = 1.5 self.state.x_max =", "DatetimeState() state1.a = np.datetime64(100, 'D') state2 = clone(state1) assert state2.a", "numpy as np from numpy.testing import assert_allclose from echo import", "state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98) def test_percentile_no_log(): #", "-122 assert self.helper.upper == 234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def", "test_change_attribute(self): self.helper.attribute = self.y_id assert self.helper.value == 2.5 self.helper.attribute =", "== -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 15", "self.state.comp = self.x_id assert self.helper.percentile == 99.5 self.state.comp = self.y_id", "import CallbackProperty, ListCallbackProperty from glue.core import Data, DataCollection from .test_state", "assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_caching(self):", "attribute='comp', # noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data = data", "[1, 2] sub_state.nested = [] state1.nested = [1, 3, sub_state]", "def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a = np.datetime64(100, 'D') state2", "# When switching to custom, the last limits are retained", "<CallbackList with 3 elements> nested: <CallbackList with 3 elements> >", "self.helper.attribute = self.x_id assert self.helper.value == -35 def test_manual_edit(self): self.state.val", "assert helper.upper == 2 class DatetimeState(State): a = CallbackProperty() def", "self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 30,", "self.y_id assert self.helper.value == 2.5 self.helper.attribute = self.x_id assert self.helper.value", "state1 = DatetimeState() state1.a = np.datetime64(100, 'D') state2 = clone(state1)", "assert state2.nested[2].nested == [] EXPECTED_STR = \"\"\" a: 2 b:", "sub_state] assert str(state1) == EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class", "state.n_bin == 9 state.n_bin = 12 state.common = True state.comp", "= -122 self.state.upper = 234 self.helper.log = True assert self.helper.lower", "assert state.upper == +3 state.log = False state.percentile = 99", "5.4, 7.2, -1.1, 2.3], y=['a', 'f', 'd', 'e', 'f', 'f',", "nested: <CallbackList with 3 elements> \"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState", "self.state.n_bin == 3 def test_caching(self): self.state.comp = self.data.id['x'] self.state.x_min =", "state.common = True state.comp = data.id['x'] assert state.n_bin == 12", "= \"\"\" <SimpleTestState a: 2 b: hello flat: <CallbackList with", "= self.data.id['x'] self.state.x_min = 2 self.state.x_max = 7 self.state.n_bin =", "class DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState()", "def test_manual_edit(self): self.state.val = 42. assert self.helper.value == 42 self.state.comp", "state.common = False state.n_bin = 13 state.comp = data.id['x'] assert", "data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data') class SimpleState(State):", "assert state2.b == 'hello' assert state2.flat == [1, 3, 4]", "== 7.2 assert self.state.n_bin == 4 self.state.comp = self.data.id['y'] assert", "are cached on a per-attribute basis. self.helper.percentile = 99.5 self.state.comp", "'f', 'a'], label='test_data') self.data_collection = DataCollection([self.data]) class SimpleState(State): layer =", "self.helper.value == 42 class TestStateAttributeHistogramHelper(): def setup_method(self, method): self.data =", "assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) def test_percentile_cached(self): # Make sure that", "assert self.state.n_bin == 15 def test_default_categorical(self): self.state.comp = self.data.id['y'] assert", "= data.id['x'] assert state.n_bin == 11 def test_limits_helper_initial_values(): # Regression", "self.state.comp = self.y_id assert self.helper.value == 2.5 self.state.comp = self.x_id", "DatetimeState(State): a = CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a", "state.log = False state.percentile = 99 assert_allclose(state.lower, -1.97) assert_allclose(state.upper, +2.98)", "-3.2 assert self.state.x_max == 7.2 assert self.state.n_bin == 15 def", "assert state2.nested[0:2] == [1, 3] assert state2.nested[2].a == 3 assert", "assert self.helper.lower == -122 assert self.helper.upper == 234 assert self.helper.log", "SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data =", "== [1, 3, 4] assert state2.nested[0:2] == [1, 3] assert", "lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data state.comp = data.id['x']", "scale # modes are cached on a per-attribute basis. self.helper.percentile", "sure that common_n_bin works as expected if True from start", "making a new # state helper, but this could be", "a new # state helper, but this could be improved", "2 self.state.x_max = 7 self.state.n_bin = 8 self.state.comp = self.data.id['y']", "data.id['x'] assert state.n_bin == 11 def test_limits_helper_initial_values(): # Regression test", "= 234 self.helper.log = True assert self.helper.lower == -122 assert", "y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer = CallbackProperty() comp", "4] sub_state = SimpleTestState() state1.nested = [1, 3, sub_state] assert", "self.state.n_bin == 4 self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5", "Make sure that common_n_bin works as expected if True from", "SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min = CallbackProperty()", "import (State, StateAttributeLimitsHelper, StateAttributeSingleValueHelper, StateAttributeHistogramHelper) class SimpleTestState(State): a = CallbackProperty()", "state.n_bin = 9 state.comp = data.id['z'] assert state.n_bin == 9", "values are re-cached when edited manually self.helper.percentile = \"Custom\" self.state.lower", "a = CallbackProperty() def test_state_serialization_datetime64(): state1 = DatetimeState() state1.a =", "self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self): assert self.helper.value", "def test_hitting_limits(self): # FIXME: here we modify the internal defaults", "state.n_bin == 15 state.n_bin = 12 state.common = True state.comp", "start data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f', 'd'], z=[1.1,", "assert self.state.x_min == 2 assert self.state.x_max == 7 assert self.state.n_bin", "CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty()", "assert self.state.x_max == 3.5 assert self.state.n_bin == 4 def test_hitting_limits(self):", "assert state.n_bin == 12 state.n_bin = 11 state.comp = data.id['y']", "self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper', percentile='scale',", "== 4 self.state.comp = self.data.id['y'] assert self.state.x_min == -0.5 assert", "assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin():", "assert self.helper.value == 2.5 self.state.comp = self.x_id assert self.helper.value ==", "3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3, 2.2], y=['a', 'f',", "test_nan_inf_minmax(): data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data') class", "attribute but no log. data = Data(x=np.linspace(-100, 100, 10000), y=np.linspace(2,", "assert state.upper == +3 state.log = True assert state.lower ==", "state1.a = 2 state1.b = 'hello' state1.flat = [1, 3,", "= data.id['x'] assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active(): # Make", "test_change_percentile(self): # Changing scale mode updates the limits self.helper.percentile =", "DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() val", "3.5 assert self.state.n_bin == 4 def test_hitting_limits(self): # FIXME: here", "internal defaults rather than making a new # state helper,", "self.state.x_max = 3.5 self.state.n_bin = 3 self.state.comp = self.data.id['x'] assert", "state.n_bin == 11 state.common = False state.n_bin = 13 state.comp", "= self.data.id['x'] self.x_id = self.data.main_components[0] self.y_id = self.data.main_components[1] def test_value(self):", "= 9 state.comp = data.id['z'] assert state.n_bin == 9 state.n_bin", "state2.nested[2].b == 'blah' assert state2.nested[2].flat == [1, 2] assert state2.nested[2].nested", "== 'hello' assert state2.flat == [1, 3, 4] assert state2.nested[0:2]", "CallbackProperty() upper = CallbackProperty() state = SimpleState() state.lower = 1", "glue.core import Data, DataCollection from .test_state import clone from ..state_objects", "-1.1, 2.3], y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data')", "CallbackProperty() n_bin = CallbackProperty() self.state = SimpleState() self.helper = StateAttributeHistogramHelper(self.state,", "CallbackProperty(100) self.state = SimpleState() self.helper = StateAttributeLimitsHelper(self.state, attribute='comp', lower='lower', upper='upper',", "EXPECTED_STR.strip() assert repr(state1) == EXPECTED_REPR.strip() class TestStateAttributeLimitsHelper(): def setup_method(self, method):", "3 assert not self.helper.log self.state.comp = self.x_id assert self.helper.lower ==", "13 state.comp = data.id['x'] assert state.n_bin == 11 def test_histogram_helper_common_n_bin_active():", "100, 10000), y=np.linspace(2, 3, 10000), label='test_data') class SimpleState(State): layer =", "assert self.state.x_min == -3.2 assert self.state.x_max == 7.2 assert self.state.n_bin", "assert_allclose(state.upper, +2.98) def test_percentile_no_log(): # Regression test for a bug", "sub_state.b = 'blah' sub_state.flat = [1, 2] sub_state.nested = []", "[1, 3, 4] sub_state = SimpleTestState() state1.nested = [1, 3,", "assert state2.flat == [1, 3, 4] assert state2.nested[0:2] == [1,", "percentile='scale', log='log') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id =", "def test_state_serialization(): state1 = SimpleTestState() state1.a = 2 state1.b =", "test_default_numerical(self): self.state.comp = self.data.id['x'] assert self.state.x_min == -3.2 assert self.state.x_max", "1.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3 def", "EXPECTED_REPR = \"\"\" <SimpleTestState a: 2 b: hello flat: <CallbackList", "4.3, 2.2], y=['a', 'f', 'd'], z=[1.1, 2.3, 1.2], label='test_data') class", "4] assert state2.nested[0:2] == [1, 3] assert state2.nested[2].a == 3", "CallbackProperty() x_max = CallbackProperty() n_bin = CallbackProperty() common = CallbackProperty(True)", "self.helper.upper == 3 assert not self.helper.log self.state.comp = self.x_id assert", "[1, 2] assert state2.nested[2].nested == [] EXPECTED_STR = \"\"\" a:", "self.state.comp = self.y_id assert self.helper.percentile == 99 def test_flip_button(self): self.helper.flip_limits()", "self.helper.percentile = 99 self.state.comp = self.x_id assert self.helper.percentile == 99.5", "class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data = Data(x=np.linspace(-100, 30, 9999),", "state2.nested[2].nested == [] EXPECTED_STR = \"\"\" a: 2 b: hello", "234 assert self.helper.log self.state.comp = self.y_id assert self.helper.lower == 2", "234 assert self.helper.log class TestStateAttributeSingleValueHelper(): def setup_method(self, method): self.data =", "SimpleTestState() state1.nested = [1, 3, sub_state] assert str(state1) == EXPECTED_STR.strip()", "# Regression test for a bug that caused a crash", "a per-attribute basis. self.helper.percentile = 99.5 self.state.comp = self.y_id assert", "= self.data.id['y'] self.state.x_min = 1.5 self.state.x_max = 3.5 self.state.n_bin =", "9 state.comp = data.id['z'] assert state.n_bin == 9 state.n_bin =", "limits are retained self.helper.percentile = \"Custom\" assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90)", "with 3 elements> nested: <CallbackList with 3 elements> > \"\"\"", "lower='lower', upper='upper', percentile='scale', log='log') self.state.data = self.data self.state.comp = self.data.id['x']", "= SimpleState() self.helper = StateAttributeHistogramHelper(self.state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin') self.state.data", "works as expected if True from start data = Data(x=[-3.2,", "data state.comp = data.id['x'] state.n_bin = 9 state.comp = data.id['z']", "= 1 state.upper = 2 state.comp = data.id['x'] helper =", "# noqa lower='lower', upper='upper', percentile='percentile', log='log') state.data = data state.comp", "comp = CallbackProperty() lower = CallbackProperty() upper = CallbackProperty() scale", "CallbackProperty() state = SimpleState() state.comp = data.id['x'] state.lower = 2", "Regression test for a bug that caused a crash if", "elements> nested: <CallbackList with 3 elements> \"\"\" EXPECTED_REPR = \"\"\"", "= 99.5 assert_allclose(self.helper.lower, -99.5) assert_allclose(self.helper.upper, +99.5) self.helper.percentile = 99 assert_allclose(self.helper.lower,", "assert self.helper.percentile == 100 self.helper.percentile = 99 self.state.comp = self.x_id", "assert_allclose(self.helper.upper, +99) self.helper.percentile = 90 assert_allclose(self.helper.lower, -90) assert_allclose(self.helper.upper, +90) #", "== 3 state.comp = data.id['z'] assert state.n_bin == 11 state.common", "nested = ListCallbackProperty() def test_state_serialization(): state1 = SimpleTestState() state1.a =", "= self.x_id assert self.helper.value == -35 def test_manual_edit(self): self.state.val =", "234 self.helper.log = True assert self.helper.lower == -122 assert self.helper.upper", "== [1, 3] assert state2.nested[2].a == 3 assert state2.nested[2].b ==", "assert self.state.n_bin == 3 def test_histogram_helper_common_n_bin(): data = Data(x=[-3.2, 4.3,", "lower='lower', upper='upper') assert helper.lower == 1 assert helper.upper == 2", "assert state.lower == -2 assert state.upper == +3 state.log =", "assert state.n_bin == 15 state.n_bin = 12 state.common = True", "= DataCollection([self.data]) class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty()", "attribute='comp', function=np.nanmedian, value='val') self.state.data = self.data self.state.comp = self.data.id['x'] self.x_id", "== 1.5 assert self.state.x_max == 3.5 assert self.state.n_bin == 3", "= self.data.main_components[0] self.y_id = self.data.main_components[1] def test_minmax(self): assert self.helper.lower ==", "assert self.helper.upper == 3 assert not self.helper.log self.state.comp = self.x_id", "with 3 elements> \"\"\" EXPECTED_REPR = \"\"\" <SimpleTestState a: 2", "== 99.5 self.state.comp = self.y_id assert self.helper.percentile == 99 def", "= StateAttributeHistogramHelper(state, attribute='comp', lower='x_min', upper='x_max', n_bin='n_bin', common_n_bin='common') state.data = data", "but this could be improved self.helper._default_n_bin = 4 self.helper._max_n_bin =", "state1.a = np.datetime64(100, 'D') state2 = clone(state1) assert state2.a ==", "+2.98) def test_percentile_no_log(): # Regression test for a bug that", "self.state.comp = self.x_id assert self.helper.lower == -122 assert self.helper.upper ==", "class SimpleState(State): layer = CallbackProperty() comp = CallbackProperty() x_min =", "== np.datetime64(100, 'D') def test_nan_inf_minmax(): data = Data(x=[3, 1, -2," ]
[ "<reponame>victormartinez/ecommerceapi from typing import Iterable, Optional class ProductsNotFound(Exception): def __init__(self,", "= product_ids or [] self.message = \"One or more products", "= None): self.product_ids = product_ids or [] self.message = \"One", "ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids = product_ids", "[] self.message = \"One or more products are invalid.\" super().__init__(self.message)", "from typing import Iterable, Optional class ProductsNotFound(Exception): def __init__(self, product_ids:", "Iterable, Optional class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] = None):", "typing import Iterable, Optional class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]]", "Optional class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids", "import Iterable, Optional class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] =", "Optional[Iterable[int]] = None): self.product_ids = product_ids or [] self.message =", "def __init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids = product_ids or", "__init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids = product_ids or []", "product_ids: Optional[Iterable[int]] = None): self.product_ids = product_ids or [] self.message", "class ProductsNotFound(Exception): def __init__(self, product_ids: Optional[Iterable[int]] = None): self.product_ids =", "None): self.product_ids = product_ids or [] self.message = \"One or", "or [] self.message = \"One or more products are invalid.\"", "self.product_ids = product_ids or [] self.message = \"One or more", "product_ids or [] self.message = \"One or more products are" ]
[ "[\"Nigel\", \"The British Empire\"]) assert len(a_record) == 2 def test_record_repr(self):", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\", \"age\",", "with self.assertRaises(IndexError): _ = r.values(1, 0, 999) def test_record_items(self): r", "hash(record2) != hash(record3) def test_record_iter(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\",", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1)", "2.0 (the \"License\"); # you may not use this file", "[\"name\", \"empire\"] def test_record_copy(self): original = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\",", "# This file is part of Neo4j. # # Licensed", "= r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with", "assert original.keys() == duplicate.keys() assert original is not duplicate def", "under the License. from unittest import TestCase from neo4j.v1 import", "British Empire\"]) assert len(a_record) == 2 def test_record_repr(self): a_record =", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",))", "test_record_items(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(),", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError): _ =", "with self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError): _ = r.index(None)", "self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6),", "6) with self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self): r =", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"),", "size\"), None) self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33)", "dict(a_record) == {\"name\": \"Nigel\", \"empire\": \"The British Empire\"} def test_record_as_list(self):", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record) ==", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record2 = Record([\"name\",", "\"empire\"] def test_record_len(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "[\"Nigel\", \"The British Empire\"]) assert repr(a_record) == \"<Record name='Nigel' empire='The", "[\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33, \"married\": True})", "language governing permissions and # limitations under the License. from", "self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def test_record_values(self): r = Record([\"name\", \"age\",", "1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _ = r.index(\"shoe size\") self.assertEqual(r.index(0),", "part of Neo4j. # # Licensed under the Apache License,", "(\"name\", \"Alice\")]) with self.assertRaises(IndexError): _ = r.items(1, 0, 999) def", "0), {\"age\": 33, \"name\": \"Alice\"}) with self.assertRaises(IndexError): _ = r.data(1,", "\"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"),", "{\"age\": 33, \"name\": \"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\":", "use this file except in compliance with the License. #", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert len(a_record) ==", "Engine for Objects in Lund AB [http://neotechnology.com] # # This", "test_record_equality(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record2", "\"age\", \"married\")) def test_record_values(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "def test_record_index(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self): r = Record([\"name\", \"age\",", "[(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "_ = r.items(1, 0, 999) def test_record_index(self): r = Record([\"name\",", "\"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33),", "License. # You may obtain a copy of the License", "size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _", "{\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\",", "\"Alice\")]) with self.assertRaises(IndexError): _ = r.items(1, 0, 999) def test_record_index(self):", "{\"name\": \"Nigel\", \"empire\": \"The British Empire\"} def test_record_as_list(self): a_record =", "999) def test_record_keys(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "under the License is distributed on an \"AS IS\" BASIS,", "33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError): _ = r.items(1, 0, 999)", "\"name\"), {\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\":", "License for the specific language governing permissions and # limitations", "[\"Nigel\", \"The British Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\",", "Neo4j. # # Licensed under the Apache License, Version 2.0", "!= record3 def test_record_hashing(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "class RecordTestCase(TestCase): def test_record_equality(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "def test_record_as_dict(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\", 33), (\"name\",", "\"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\": \"Alice\"}) with self.assertRaises(IndexError): _", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"),", "True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\", \"Alice\")])", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33)", "\"The British Empire\"]) assert list(a_record) == [\"name\", \"empire\"] def test_record_len(self):", "\"Nigel\", \"empire\": \"The British Empire\"} def test_record_as_list(self): a_record = Record([\"name\",", "(c) 2002-2018 \"Neo Technology,\" # Network Engine for Objects in", "== [\"name\", \"empire\"] def test_record_copy(self): original = Record([\"name\", \"empire\"], [\"Nigel\",", "(\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\",", "test_record_as_dict(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert", "self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0),", "in r) self.assertFalse(\"shoe size\" in r) self.assertTrue(0 in r) self.assertTrue(1", "neo4j.v1 import Record class RecordTestCase(TestCase): def test_record_equality(self): record1 = Record([\"name\",", "hash(record2) assert hash(record1) != hash(record3) assert hash(record2) != hash(record3) def", "with self.assertRaises(KeyError): _ = r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1)", "self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\",", "= Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert hash(record1) == hash(record2)", "33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"),", "[\"Stefan\", \"Das Deutschland\"]) assert record1 == record2 assert record1 !=", "33), (\"name\", \"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"),", "Lund AB [http://neotechnology.com] # # This file is part of", "= r.data(1, 0, 999) def test_record_keys(self): r = Record([\"name\", \"age\",", "1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError):", "\"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")])", "33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"),", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "self.assertTrue(2 in r) self.assertFalse(3 in r) with self.assertRaises(TypeError): _ =", "assert record2 != record3 def test_record_hashing(self): record1 = Record([\"name\", \"empire\"],", "def test_record_as_list(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "assert hash(record1) != hash(record3) assert hash(record2) != hash(record3) def test_record_iter(self):", "original is not duplicate def test_record_as_dict(self): a_record = Record([\"name\", \"empire\"],", "record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert hash(record1) ==", "_ = r.index(None) def test_record_value(self): r = Record([\"name\", \"age\", \"married\"],", "None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33,", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2)", "original.copy() assert dict(original) == dict(duplicate) assert original.keys() == duplicate.keys() assert", "self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self):", "self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"),", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record.__iter__()) == [\"name\",", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\")", "self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\":", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) duplicate = original.copy()", "Empire\"]) assert list(a_record.__iter__()) == [\"name\", \"empire\"] def test_record_copy(self): original =", "\"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0),", "== hash(record2) assert hash(record1) != hash(record3) assert hash(record2) != hash(record3)", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\",", "limitations under the License. from unittest import TestCase from neo4j.v1", "[\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with", "self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ = r.index(3) with", "33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33),", "record1 == record2 assert record1 != record3 assert record2 !=", "Technology,\" # Network Engine for Objects in Lund AB [http://neotechnology.com]", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record2 = Record([\"name\", \"empire\"],", "\"shoe size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"})", "r) self.assertFalse(\"shoe size\" in r) self.assertTrue(0 in r) self.assertTrue(1 in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _ = r.index(\"shoe size\")", "size\"), {\"age\": 33, \"name\": \"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0, \"name\"),", "to in writing, software # distributed under the License is", "\"The British Empire\"]) record2 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _ = r.value(None) def", "# See the License for the specific language governing permissions", "\"Alice\")) with self.assertRaises(IndexError): _ = r.values(1, 0, 999) def test_record_items(self):", "0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ = r.index(3)", "\"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\":", "33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\",", "\"Alice\"), (\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"),", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\":", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "hash(record3) def test_record_iter(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "British Empire\"]) assert repr(a_record) == \"<Record name='Nigel' empire='The British Empire'>\"", "TestCase from neo4j.v1 import Record class RecordTestCase(TestCase): def test_record_equality(self): record1", "self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\", 6),", "[\"Stefan\", \"Das Deutschland\"]) assert hash(record1) == hash(record2) assert hash(record1) !=", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record.__iter__()) ==", "record2 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record3 =", "assert len(a_record) == 2 def test_record_repr(self): a_record = Record([\"name\", \"empire\"],", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"),", "compliance with the License. # You may obtain a copy", "!= record3 assert record2 != record3 def test_record_hashing(self): record1 =", "33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\": 33, \"name\":", "agreed to in writing, software # distributed under the License", "self.assertFalse(\"shoe size\" in r) self.assertTrue(0 in r) self.assertTrue(1 in r)", "(\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\")) with self.assertRaises(IndexError):", "unittest import TestCase from neo4j.v1 import Record class RecordTestCase(TestCase): def", "in r) self.assertTrue(2 in r) self.assertFalse(3 in r) with self.assertRaises(TypeError):", "distributed under the License is distributed on an \"AS IS\"", "\"empire\"], [\"Nigel\", \"The British Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\",", "self.assertTrue(1 in r) self.assertTrue(2 in r) self.assertFalse(3 in r) with", "\"name\"), [(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\",", "(\"name\", \"age\", \"married\")) def test_record_values(self): r = Record([\"name\", \"age\", \"married\"],", "\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33,", "(\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\")) with self.assertRaises(IndexError): _ = r.values(1,", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "\"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe", "size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\",", "from neo4j.v1 import Record class RecordTestCase(TestCase): def test_record_equality(self): record1 =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "[(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\",", "\"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\", \"Alice\")])", "r.value(None) def test_record_contains(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "not use this file except in compliance with the License.", "\"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\",", "writing, software # distributed under the License is distributed on", "[(\"age\", 33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError): _ = r.items(1, 0,", "\"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record) == [\"name\", \"empire\"]", "you may not use this file except in compliance with", "\"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None)", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "\"The British Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"])", "[\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33), (\"married\", True)])", "(\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\",", "governing permissions and # limitations under the License. from unittest", "(33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0,", "self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"})", "CONDITIONS OF ANY KIND, either express or implied. # See", "\"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\": 33, \"name\": \"Alice\", \"shoe", "British Empire\"]) duplicate = original.copy() assert dict(original) == dict(duplicate) assert", "0, 999) def test_record_items(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"Alice\"}) with self.assertRaises(IndexError): _ = r.data(1, 0, 999) def test_record_keys(self):", "\"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\":", "British Empire\"} def test_record_as_list(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record3 = Record([\"name\",", "def test_record_keys(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert record1 ==", "def test_record_hashing(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "\"empire\"], [\"Nigel\", \"The British Empire\"]) assert len(a_record) == 2 def", "License. from unittest import TestCase from neo4j.v1 import Record class", "def test_record_repr(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\" in r) self.assertTrue(0 in r)", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\" in r)", "def test_record_contains(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "self.assertRaises(IndexError): _ = r.values(1, 0, 999) def test_record_items(self): r =", "\"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0),", "self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self): r = Record([\"name\", \"age\",", "33, True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in", "[(\"name\", \"Alice\"), (\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\",", "in r) self.assertTrue(1 in r) self.assertTrue(2 in r) self.assertFalse(3 in", "!= hash(record3) assert hash(record2) != hash(record3) def test_record_iter(self): a_record =", "self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"),", "Deutschland\"]) assert hash(record1) == hash(record2) assert hash(record1) != hash(record3) assert", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"),", "\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\": 33, \"name\": \"Alice\",", "[(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\",", "record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record2 =", "duplicate def test_record_as_dict(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3,", "== 2 def test_record_repr(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "r) self.assertTrue(2 in r) self.assertFalse(3 in r) with self.assertRaises(TypeError): _", "r.index(None) def test_record_value(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "OR CONDITIONS OF ANY KIND, either express or implied. #", "_ = r.index(3) with self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self):", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\" in", "hash(record1) != hash(record3) assert hash(record2) != hash(record3) def test_record_iter(self): a_record", "[\"Alice\", 33, True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\"", "self.assertRaises(KeyError): _ = r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2),", "the License is distributed on an \"AS IS\" BASIS, #", "self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\")) with self.assertRaises(IndexError): _ =", "self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1,", "a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert dict(a_record)", "self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\"))", "with self.assertRaises(IndexError): _ = r.data(1, 0, 999) def test_record_keys(self): r", "Empire\"} def test_record_as_list(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "assert list(a_record) == [\"name\", \"empire\"] def test_record_len(self): a_record = Record([\"name\",", "self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\": \"Alice\"}) with self.assertRaises(IndexError): _ =", "test_record_values(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(),", "duplicate.keys() assert original is not duplicate def test_record_as_dict(self): a_record =", "\"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\":", "= Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert record1 == record2", "Empire\"]) assert len(a_record) == 2 def test_record_repr(self): a_record = Record([\"name\",", "= r.value(None) def test_record_contains(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert hash(record1) == hash(record2) assert", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"),", "law or agreed to in writing, software # distributed under", "True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def test_record_values(self): r = Record([\"name\",", "(33, \"Alice\")) with self.assertRaises(IndexError): _ = r.values(1, 0, 999) def", "[\"Nigel\", \"The British Empire\"]) assert dict(a_record) == {\"name\": \"Nigel\", \"empire\":", "test_record_keys(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(),", "(\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33,", "British Empire\"]) record2 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "Empire\"]) assert list(a_record) == [\"name\", \"empire\"] def test_record_len(self): a_record =", "record3 def test_record_hashing(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "in r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\" in r) self.assertTrue(0", "\"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert record1 == record2 assert record1", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def", "duplicate = original.copy() assert dict(original) == dict(duplicate) assert original.keys() ==", "assert dict(original) == dict(duplicate) assert original.keys() == duplicate.keys() assert original", "record2 assert record1 != record3 assert record2 != record3 def", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33),", "assert original is not duplicate def test_record_as_dict(self): a_record = Record([\"name\",", "= r.index(None) def test_record_value(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "test_record_copy(self): original = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) duplicate", "#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c)", "Network Engine for Objects in Lund AB [http://neotechnology.com] # #", "def test_record_iter(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "may obtain a copy of the License at # #", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record) == [\"name\",", "\"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe", "def test_record_items(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "for Objects in Lund AB [http://neotechnology.com] # # This file", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def test_record_values(self):", "self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError): _", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert record1", "assert list(a_record.__iter__()) == [\"name\", \"empire\"] def test_record_copy(self): original = Record([\"name\",", "from unittest import TestCase from neo4j.v1 import Record class RecordTestCase(TestCase):", "self.assertTrue(0 in r) self.assertTrue(1 in r) self.assertTrue(2 in r) self.assertFalse(3", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"), 0)", "\"shoe size\"), [(\"age\", 33), (\"name\", \"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0,", "British Empire'>\" def test_record_data(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0), \"Alice\")", "[(\"age\", 33), (\"name\", \"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\",", "may not use this file except in compliance with the", "RecordTestCase(TestCase): def test_record_equality(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"name\": \"Alice\"}) with self.assertRaises(IndexError): _ = r.data(1, 0, 999) def", "(\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\",", "this file except in compliance with the License. # You", "assert hash(record1) == hash(record2) assert hash(record1) != hash(record3) assert hash(record2)", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33, \"married\":", "# limitations under the License. from unittest import TestCase from", "\"The British Empire\"]) assert dict(a_record) == {\"name\": \"Nigel\", \"empire\": \"The", "(33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1,", "33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33,", "r) self.assertTrue(0 in r) self.assertTrue(1 in r) self.assertTrue(2 in r)", "6), 6) with self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self): r", "(\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\",", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "empire='The British Empire'>\" def test_record_data(self): r = Record([\"name\", \"age\", \"married\"],", "\"name\", \"shoe size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\"))", "# # Licensed under the Apache License, Version 2.0 (the", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Empire\"]) assert repr(a_record) == \"<Record name='Nigel' empire='The British Empire'>\" def", "self.assertRaises(IndexError): _ = r.data(1, 0, 999) def test_record_keys(self): r =", "# Copyright (c) 2002-2018 \"Neo Technology,\" # Network Engine for", "r.values(1, 0, 999) def test_record_items(self): r = Record([\"name\", \"age\", \"married\"],", "def test_record_copy(self): original = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\", 33, True))", "2) with self.assertRaises(KeyError): _ = r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1),", "[http://neotechnology.com] # # This file is part of Neo4j. #", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[\"Nigel\", \"The British Empire\"]) assert list(a_record.__iter__()) == [\"name\", \"empire\"] def", "test_record_hashing(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record2", "33, True]) self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"),", "True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe", "British Empire\"]) assert dict(a_record) == {\"name\": \"Nigel\", \"empire\": \"The British", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert len(a_record) == 2", "True]) self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33,", "\"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError): _", "\"empire\"] def test_record_copy(self): original = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "Objects in Lund AB [http://neotechnology.com] # # This file is", "== {\"name\": \"Nigel\", \"empire\": \"The British Empire\"} def test_record_as_list(self): a_record", "of Neo4j. # # Licensed under the Apache License, Version", "= r.items(1, 0, 999) def test_record_index(self): r = Record([\"name\", \"age\",", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\"", "\"Das Deutschland\"]) assert record1 == record2 assert record1 != record3", "\"empire\"], [\"Nigel\", \"The British Empire\"]) assert repr(a_record) == \"<Record name='Nigel'", "size\" in r) self.assertTrue(0 in r) self.assertTrue(1 in r) self.assertTrue(2", "Empire\"]) record2 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record3", "\"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33), (\"married\",", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "== [\"name\", \"empire\"] def test_record_len(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\",", "\"name\", \"shoe size\"), [(\"age\", 33), (\"name\", \"Alice\"), (\"shoe size\", None)])", "-*- # Copyright (c) 2002-2018 \"Neo Technology,\" # Network Engine", "33, \"name\": \"Alice\"}) with self.assertRaises(IndexError): _ = r.data(1, 0, 999)", "a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert repr(a_record)", "(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\", 33), (\"name\", \"Alice\"),", "33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError):", "[\"Nigel\", \"The British Empire\"]) assert list(a_record) == [\"name\", \"empire\"] def", "original.keys() == duplicate.keys() assert original is not duplicate def test_record_as_dict(self):", "== record2 assert record1 != record3 assert record2 != record3", "or implied. # See the License for the specific language", "in r) self.assertFalse(3 in r) with self.assertRaises(TypeError): _ = r.index(None)", "[\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"),", "{\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\": \"Alice\"}) with self.assertRaises(IndexError):", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "AB [http://neotechnology.com] # # This file is part of Neo4j.", "This file is part of Neo4j. # # Licensed under", "== dict(duplicate) assert original.keys() == duplicate.keys() assert original is not", "0), (33, \"Alice\")) with self.assertRaises(IndexError): _ = r.values(1, 0, 999)", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\": 33,", "test_record_index(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.index(\"name\"),", "== \"<Record name='Nigel' empire='The British Empire'>\" def test_record_data(self): r =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "hash(record3) assert hash(record2) != hash(record3) def test_record_iter(self): a_record = Record([\"name\",", "test_record_as_list(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\", 33,", "assert dict(a_record) == {\"name\": \"Nigel\", \"empire\": \"The British Empire\"} def", "== duplicate.keys() assert original is not duplicate def test_record_as_dict(self): a_record", "0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _ = r.index(\"shoe", "test_record_data(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(),", "assert record1 != record3 assert record2 != record3 def test_record_hashing(self):", "and # limitations under the License. from unittest import TestCase", "[(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\", 33),", "(the \"License\"); # you may not use this file except", "test_record_value(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(),", "999) def test_record_items(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "# you may not use this file except in compliance", "def test_record_value(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe", "2) with self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError): _ =", "0, 999) def test_record_keys(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "dict(original) == dict(duplicate) assert original.keys() == duplicate.keys() assert original is", "permissions and # limitations under the License. from unittest import", "True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe", "not duplicate def test_record_as_dict(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "British Empire\"]) assert list(a_record.__iter__()) == [\"name\", \"empire\"] def test_record_copy(self): original", "list(a_record.__iter__()) == [\"name\", \"empire\"] def test_record_copy(self): original = Record([\"name\", \"empire\"],", "\"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record.__iter__()) == [\"name\", \"empire\"]", "_ = r.data(1, 0, 999) def test_record_keys(self): r = Record([\"name\",", "[\"Alice\", 33, True]) self.assertEqual(r.values(), (\"Alice\", 33, True)) self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\",", "# # This file is part of Neo4j. # #", "# # Unless required by applicable law or agreed to", "size\"), [(\"age\", 33), (\"name\", \"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"),", "self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\": \"Alice\"}) with", "\"<Record name='Nigel' empire='The British Empire'>\" def test_record_data(self): r = Record([\"name\",", "Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert hash(record1)", "[\"name\", \"empire\"] def test_record_len(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "list(a_record) == [\"name\", \"empire\"] def test_record_len(self): a_record = Record([\"name\", \"empire\"],", "name='Nigel' empire='The British Empire'>\" def test_record_data(self): r = Record([\"name\", \"age\",", "Record class RecordTestCase(TestCase): def test_record_equality(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\",", "Version 2.0 (the \"License\"); # you may not use this", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert repr(a_record) ==", "\"shoe size\"), {\"age\": 33, \"name\": \"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0,", "True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\": \"Alice\"})", "original = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) duplicate =", "# -*- encoding: utf-8 -*- # Copyright (c) 2002-2018 \"Neo", "self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError): _ =", "import Record class RecordTestCase(TestCase): def test_record_equality(self): record1 = Record([\"name\", \"empire\"],", "_ = r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2)", "0, 999) def test_record_index(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\",", "_ = r.values(1, 0, 999) def test_record_items(self): r = Record([\"name\",", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"empire\"], [\"Nigel\", \"The British Empire\"]) assert dict(a_record) == {\"name\": \"Nigel\",", "self.assertRaises(IndexError): _ = r.items(1, 0, 999) def test_record_index(self): r =", "r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\" in", "self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\": 33, \"name\": \"Alice\", \"shoe size\":", "\"The British Empire\"]) assert list(a_record.__iter__()) == [\"name\", \"empire\"] def test_record_copy(self):", "len(a_record) == 2 def test_record_repr(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\",", "def test_record_data(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "by applicable law or agreed to in writing, software #", "a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert len(a_record)", "record3 assert record2 != record3 def test_record_hashing(self): record1 = Record([\"name\",", "None) self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2),", "with self.assertRaises(TypeError): _ = r.value(None) def test_record_contains(self): r = Record([\"name\",", "self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"), {\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\",", "r.index(3) with self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self): r =", "\"The British Empire\"} def test_record_as_list(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\",", "Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert record1 == record2 assert", "a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record)", "\"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\": \"Alice\"})", "(\"name\", \"Alice\"), (\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\",", "True]) self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\",", "file is part of Neo4j. # # Licensed under the", "\"shoe size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0),", "British Empire\"]) assert list(a_record) == [\"name\", \"empire\"] def test_record_len(self): a_record", "r.items(1, 0, 999) def test_record_index(self): r = Record([\"name\", \"age\", \"married\"],", "33, True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def test_record_values(self): r =", "{\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0), {\"age\": 33, \"name\":", "is not duplicate def test_record_as_dict(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\",", "self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with", "\"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in r)", "self.assertEqual(r.values(\"name\"), (\"Alice\",)) self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"),", "0), [(\"age\", 33), (\"name\", \"Alice\")]) with self.assertRaises(IndexError): _ = r.items(1,", "[\"Nigel\", \"The British Empire\"]) record2 = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _ =", "is part of Neo4j. # # Licensed under the Apache", "r.data(1, 0, 999) def test_record_keys(self): r = Record([\"name\", \"age\", \"married\"],", "self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\", 33), (\"name\", \"Alice\"), (\"shoe size\",", "Empire\"]) assert dict(a_record) == {\"name\": \"Nigel\", \"empire\": \"The British Empire\"}", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "{\"age\": 33, \"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\", \"shoe size\"), {\"age\": 33,", "[\"Nigel\", \"The British Empire\"]) duplicate = original.copy() assert dict(original) ==", "\"empire\"], [\"Nigel\", \"The British Empire\"]) duplicate = original.copy() assert dict(original)", "Unless required by applicable law or agreed to in writing,", "self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _", "True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _", "\"name\", \"shoe size\"), {\"age\": 33, \"name\": \"Alice\", \"shoe size\": None})", "33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"), 33) self.assertEqual(r.value(\"married\"), True)", "the specific language governing permissions and # limitations under the", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert dict(a_record) == {\"name\":", "33, \"name\": \"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"})", "r.index(\"shoe size\") self.assertEqual(r.index(0), 0) self.assertEqual(r.index(1), 1) self.assertEqual(r.index(2), 2) with self.assertRaises(IndexError):", "in r) self.assertTrue(0 in r) self.assertTrue(1 in r) self.assertTrue(2 in", "repr(a_record) == \"<Record name='Nigel' empire='The British Empire'>\" def test_record_data(self): r", "applicable law or agreed to in writing, software # distributed", "\"name\": \"Alice\", \"shoe size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0),", "2002-2018 \"Neo Technology,\" # Network Engine for Objects in Lund", "Copyright (c) 2002-2018 \"Neo Technology,\" # Network Engine for Objects", "in writing, software # distributed under the License is distributed", "\"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"), [(\"age\", 33), (\"name\", \"Alice\"), (\"shoe", "self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\"))", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert repr(a_record) == \"<Record", "assert hash(record2) != hash(record3) def test_record_iter(self): a_record = Record([\"name\", \"empire\"],", "self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _ =", "= Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\",", "!= hash(record3) def test_record_iter(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The", "\"married\")) def test_record_values(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "utf-8 -*- # Copyright (c) 2002-2018 \"Neo Technology,\" # Network", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\"))", "test_record_len(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\": \"Alice\", \"age\":", "33) self.assertEqual(r.value(\"married\"), True) self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\", 6), 6)", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "self.assertEqual(r.items(), [(\"name\", \"Alice\"), (\"age\", 33), (\"married\", True)]) self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")])", "with self.assertRaises(IndexError): _ = r.items(1, 0, 999) def test_record_index(self): r", "License, Version 2.0 (the \"License\"); # you may not use", "assert repr(a_record) == \"<Record name='Nigel' empire='The British Empire'>\" def test_record_data(self):", "# You may obtain a copy of the License at", "self.assertEqual(r.items(0), [(\"name\", \"Alice\")]) self.assertEqual(r.items(1, 0), [(\"age\", 33), (\"name\", \"Alice\")]) with", "size\": None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1,", "= r.index(3) with self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self): r", "\"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6)", "None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0), [(\"name\", \"Alice\")])", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def test_record_equality(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "(\"shoe size\", None)]) self.assertEqual(r.items(0, \"name\"), [(\"name\", \"Alice\"), (\"name\", \"Alice\")]) self.assertEqual(r.items(0),", "Empire\"]) duplicate = original.copy() assert dict(original) == dict(duplicate) assert original.keys()", "\"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert hash(record1) == hash(record2) assert hash(record1)", "self.assertEqual(r.items(\"name\"), [(\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\",", "self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError): _ = r.index(\"shoe size\") self.assertEqual(r.index(0), 0)", "\"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\")) with", "Empire'>\" def test_record_data(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.items(), [(\"name\",", "test_record_contains(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertTrue(\"name\"", "True]) self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in r)", "the License for the specific language governing permissions and #", "with self.assertRaises(TypeError): _ = r.index(None) def test_record_value(self): r = Record([\"name\",", "Apache License, Version 2.0 (the \"License\"); # you may not", "999) def test_record_index(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33,", "in r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\"", "either express or implied. # See the License for the", "self.assertTrue(\"name\" in r) self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe", "\"The British Empire\"]) assert len(a_record) == 2 def test_record_repr(self): a_record", "self.assertEqual(r.values(\"age\", \"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33, \"Alice\",", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "def test_record_len(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"])", "self.assertEqual(r.value(3), None) self.assertEqual(r.value(3, 6), 6) with self.assertRaises(TypeError): _ = r.value(None)", "= original.copy() assert dict(original) == dict(duplicate) assert original.keys() == duplicate.keys()", "\"The British Empire\"]) assert repr(a_record) == \"<Record name='Nigel' empire='The British", "\"empire\": \"The British Empire\"} def test_record_as_list(self): a_record = Record([\"name\", \"empire\"],", "None}) self.assertEqual(r.data(0, \"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(0), {\"name\": \"Alice\"}) self.assertEqual(r.data(1, 0),", "encoding: utf-8 -*- # Copyright (c) 2002-2018 \"Neo Technology,\" #", "\"Alice\")) self.assertEqual(r.values(0), (\"Alice\",)) self.assertEqual(r.values(1, 0), (33, \"Alice\")) with self.assertRaises(IndexError): _", "assert record1 == record2 assert record1 != record3 assert record2", "_ = r.value(None) def test_record_contains(self): r = Record([\"name\", \"age\", \"married\"],", "Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\")", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "\"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.value(), \"Alice\") self.assertEqual(r.value(\"name\"), \"Alice\") self.assertEqual(r.value(\"age\"),", "-*- encoding: utf-8 -*- # Copyright (c) 2002-2018 \"Neo Technology,\"", "= Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert dict(a_record) ==", "\"Neo Technology,\" # Network Engine for Objects in Lund AB", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) record3 = Record([\"name\", \"empire\"],", "= r.values(1, 0, 999) def test_record_items(self): r = Record([\"name\", \"age\",", "\"Das Deutschland\"]) assert hash(record1) == hash(record2) assert hash(record1) != hash(record3)", "test_record_iter(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert", "test_record_repr(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert", "2 def test_record_repr(self): a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British", "hash(record1) == hash(record2) assert hash(record1) != hash(record3) assert hash(record2) !=", "size\"), (33, \"Alice\", None)) self.assertEqual(r.values(0, \"name\"), (\"Alice\", \"Alice\")) self.assertEqual(r.values(0), (\"Alice\",))", "self.assertRaises(IndexError): _ = r.index(3) with self.assertRaises(TypeError): _ = r.index(None) def", "\"License\"); # you may not use this file except in", "size\", 6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3),", "\"The British Empire\"]) duplicate = original.copy() assert dict(original) == dict(duplicate)", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.assertEqual(r.items(\"age\", \"name\"), [(\"age\", 33), (\"name\", \"Alice\")]) self.assertEqual(r.items(\"age\", \"name\", \"shoe size\"),", "# distributed under the License is distributed on an \"AS", "British Empire\"]) record3 = Record([\"name\", \"empire\"], [\"Stefan\", \"Das Deutschland\"]) assert", "r) self.assertTrue(1 in r) self.assertTrue(2 in r) self.assertFalse(3 in r)", "# Unless required by applicable law or agreed to in", "self.assertTrue(\"age\" in r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\" in r)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "in Lund AB [http://neotechnology.com] # # This file is part", "self.assertEqual(r.value(\"shoe size\"), None) self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1),", "record2 != record3 def test_record_hashing(self): record1 = Record([\"name\", \"empire\"], [\"Nigel\",", "{\"name\": \"Alice\", \"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\",", "\"Alice\", \"age\": 33, \"married\": True}) self.assertEqual(r.data(\"name\"), {\"name\": \"Alice\"}) self.assertEqual(r.data(\"age\", \"name\"),", "You may obtain a copy of the License at #", "[\"Alice\", 33, True]) self.assertEqual(r.keys(), (\"name\", \"age\", \"married\")) def test_record_values(self): r", "\"name\"), (33, \"Alice\")) self.assertEqual(r.values(\"age\", \"name\", \"shoe size\"), (33, \"Alice\", None))", "33, True]) self.assertEqual(r.index(\"name\"), 0) self.assertEqual(r.index(\"age\"), 1) self.assertEqual(r.index(\"married\"), 2) with self.assertRaises(KeyError):", "dict(duplicate) assert original.keys() == duplicate.keys() assert original is not duplicate", "the License. from unittest import TestCase from neo4j.v1 import Record", "Deutschland\"]) assert record1 == record2 assert record1 != record3 assert", "import TestCase from neo4j.v1 import Record class RecordTestCase(TestCase): def test_record_equality(self):", "6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True) self.assertEqual(r.value(3), None)", "record1 != record3 assert record2 != record3 def test_record_hashing(self): record1", "def test_record_values(self): r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True])", "Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) duplicate = original.copy() assert", "r) self.assertTrue(\"married\" in r) self.assertFalse(\"shoe size\" in r) self.assertTrue(0 in", "# Network Engine for Objects in Lund AB [http://neotechnology.com] #", "{\"age\": 33, \"name\": \"Alice\"}) with self.assertRaises(IndexError): _ = r.data(1, 0,", "a_record = Record([\"name\", \"empire\"], [\"Nigel\", \"The British Empire\"]) assert list(a_record.__iter__())", "self.assertEqual(r.value(\"shoe size\", 6), 6) self.assertEqual(r.value(0), \"Alice\") self.assertEqual(r.value(1), 33) self.assertEqual(r.value(2), True)", "the Apache License, Version 2.0 (the \"License\"); # you may", "self.assertEqual(r.values(1, 0), (33, \"Alice\")) with self.assertRaises(IndexError): _ = r.values(1, 0,", "\"empire\"], [\"Nigel\", \"The British Empire\"]) record2 = Record([\"name\", \"empire\"], [\"Nigel\",", "python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2018", "r = Record([\"name\", \"age\", \"married\"], [\"Alice\", 33, True]) self.assertEqual(r.data(), {\"name\":" ]
[ "X_train, y_train, X_test, y_test = titanic_survive() if xgboost: model =", "y_test = titanic_survive() if xgboost: model = XGBClassifier().fit(X_train, y_train) else:", "y_test = titanic_fare() train_names, test_names = titanic_names() if xgboost: model", "'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats,", "labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex',", "model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer =", "model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model, X_test,", "y_train) if include_y: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test,", "CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model, X_cats,", "'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'],", "console should contain no error\" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer()", "cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False,", "'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown',", "X_test, y_test = titanic_embarked() train_names, test_names = titanic_names() if xgboost:", "error\" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\",", "ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names from", "no error\" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer,", "import CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble", "error\" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\",", "= titanic_names() if xgboost: model = XGBRegressor().fit(X_train, y_train) else: model", "db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20)", "should contain no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db", "X_test, y_test = titanic_survive() train_names, test_names = titanic_names() model =", "if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50,", "[], \"browser console should contain no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer", "verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model, X_cats, y_cats,", "test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from", "db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20)", "db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert", "y_cats = explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5,", "return explainer def get_catboost_regressor(): X_train, y_train, X_test, y_test = titanic_fare()", "titanic_fare() train_names, test_names = titanic_names() if xgboost: model = XGBRegressor().fit(X_train,", "labels=['Not survived', 'Survived']) else: explainer = ClassifierExplainer( model, X_test, cats=['Sex',", "dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console should", "6, 7]) explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return", "RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer", "explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5,", "model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model,", "'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats =", "= titanic_names() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model", "X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived',", "model = XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train)", "'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown',", "verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked'])", "import titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard def", "xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton',", "explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\",", "= get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def", "= titanic_embarked() train_names, test_names = titanic_names() if xgboost: model =", "y_train, X_test, y_test = titanic_embarked() train_names, test_names = titanic_names() if", "X_test, y_test = titanic_fare() train_names, test_names = titanic_names() if xgboost:", "[], \"browser console should contain no error\" def test_multiclass_dashboard(dash_duo): explainer", "from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test,", "RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer = RegressionExplainer(model, X_test, y_test,", "if xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'],", "contain no error\" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db =", "'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged,", "RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer", "= ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs()", "def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names)", "'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5,", "title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() ==", "responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [],", "y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train, y_train, X_test,", "model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test, y_test,", "xgboost: model = XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train,", "= RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if xgboost: multi_explainer =", "X_cats, y_cats = explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats,", "responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser", "def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_survive() if", "['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats,", "explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "[], \"browser console should contain no error\" def test_xgboost_regression_dashboard(dash_duo): explainer", "console should contain no error\" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False)", "ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer", "y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y model", "'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton',", "test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "= RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return", "train_names, test_names = titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer", "cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else: explainer = ClassifierExplainer(", "should contain no error\" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db", "test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True)", "[], \"browser console should contain no error\" def test_xgboost_classification_dashboard(dash_duo): explainer", "\"browser console should contain no error\" def test_xgboost_regression_dashboard(dash_duo): explainer =", "'Deck', 'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex',", "== [], \"browser console should contain no error\" def test_regression_dashboard(dash_duo):", "get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console should", "multi_explainer def get_catboost_classifier(): X_train, y_train, X_test, y_test = titanic_survive() train_names,", "timeout=30) assert dash_duo.get_logs() == [], \"browser console should contain no", "labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'],", "y_train, X_test, y_test = titanic_survive() train_names, test_names = titanic_names() model", "\"browser console should contain no error\" def test_classification_dashboard_no_y(dash_duo): explainer =", "\"browser console should contain no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer =", "RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if xgboost: multi_explainer = ClassifierExplainer(model,", "= get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else:", "XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import", "idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train, y_train, X_test, y_test", "ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs()", "else: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'],", "simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser", "else: explainer = ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not", "multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties()", "test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "error\" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\",", "7]) explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer", "= get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked,", "[], \"browser console should contain no error\" def test_regression_dashboard(dash_duo): explainer", "= ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert", "get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\",", "explainer = get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "y_train) if include_y: reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck',", "\"browser console should contain no error\" def test_regression_dashboard(dash_duo): explainer =", "contain no error\" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db =", "explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive,", "explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_fare()", "explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test", "== [], \"browser console should contain no error\" def test_xgboost_regression_dashboard(dash_duo):", "X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def", "idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats,", "contain no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db =", "def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "== [], \"browser console should contain no error\" def test_multiclass_dashboard_no_y(dash_duo):", "verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = RegressionExplainer(model, X_cats, y_cats,", "= ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg'])", "ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer", "= RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo):", "no error\" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer,", "= titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model,", "include_y=True): X_train, y_train, X_test, y_test = titanic_embarked() train_names, test_names =", "else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer =", "explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])", "should contain no error\" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db", "= get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\",", "test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer", "y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer()", "'Embarked'], labels=['Not survived', 'Survived']) else: explainer = ClassifierExplainer( model, X_test,", "\"browser console should contain no error\" def test_multiclass_dashboard(dash_duo): explainer =", "should contain no error\" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db", "def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "timeout=20) assert dash_duo.get_logs() == [], \"browser console should contain no", "== [], \"browser console should contain no error\" def test_catboost_classification_dashboard(dash_duo):", "= get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "if include_y: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds',", "explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties()", "model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not", "explainer = ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived',", "ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs() ==", "console should contain no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor()", "cat_features=[5, 6, 7]) explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False)", "should contain no error\" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db", "console should contain no error\" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True)", "title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs() == [],", "labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer = ClassifierExplainer(model, X_test,", "titanic_embarked() train_names, test_names = titanic_names() if xgboost: model = XGBClassifier().fit(X_train,", "= explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6,", "console should contain no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True)", "multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train, y_train, X_test, y_test =", "'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats", "\"browser console should contain no error\" def test_simple_classification_dashboard(dash_duo): explainer =", "else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer =", "X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer", "<reponame>hugocool/explainerdashboard import dash from catboost import CatBoostClassifier, CatBoostRegressor from xgboost", "= titanic_fare() train_names, test_names = titanic_names() if xgboost: model =", "titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False,", "= RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") else:", "[], \"browser console should contain no error\" def test_simple_classification_dashboard(dash_duo): explainer", "def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test,", "6, 7]) explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return", "[], \"browser console should contain no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer", "X_train, y_train, X_test, y_test = titanic_fare() train_names, test_names = titanic_names()", "RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import", "get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "= get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "error\" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\",", "verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female',", "= ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs()", "no error\" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer,", "X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else: explainer", "= ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else:", "if xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown',", "test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "console should contain no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False)", "include_y=True): X_train, y_train, X_test, y_test = titanic_fare() train_names, test_names =", "survived', 'Survived']) else: explainer = ClassifierExplainer( model, X_test, cats=['Sex', 'Deck',", "XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer,", "= RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer = ClassifierExplainer( model,", "import dash from catboost import CatBoostClassifier, CatBoostRegressor from xgboost import", "== [], \"browser console should contain no error\" def test_simple_classification_dashboard(dash_duo):", "no error\" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer,", "include_y: reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names,", "== [], \"browser console should contain no error\" def test_xgboost_multiclass_dashboard(dash_duo):", "'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train, y_train, X_test,", "console should contain no error\" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier()", "reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test", "train_names, test_names = titanic_names() if xgboost: model = XGBClassifier().fit(X_train, y_train)", "def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_fare() train_names,", "max_depth=10).fit(X_train, y_train) if include_y: reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex',", "labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train, y_train,", "y_cats, cat_features=[5, 6, 7]) explainer = RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index)", "contain no error\" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db =", "idxs=test_names, units=\"$\") else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'],", "error\" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\",", "CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck',", "explainer = RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats", "ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs()", "contain no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db =", "get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "y_train) if include_y: explainer = ClassifierExplainer( model, X_test, y_test, cats=['Sex',", "model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer =", "max_depth=10).fit(X_train, y_train) if include_y: if xgboost: multi_explainer = ClassifierExplainer(model, X_test,", "= ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor():", "= ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else:", "def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_embarked() train_names,", "include_y=True): X_train, y_train, X_test, y_test = titanic_survive() if xgboost: model", "'Cherbourg']) else: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex',", "== [], \"browser console should contain no error\" def test_regression_dashboard_no_y(dash_duo):", "CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model, X_test, y_test, cats=[{'Gender':", "= ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck',", "model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer = RegressionExplainer(model,", "no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db = ExplainerDashboard(explainer,", "contain no error\" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db =", "dash from catboost import CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier,", "multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton',", "get_catboost_classifier(): X_train, y_train, X_test, y_test = titanic_survive() train_names, test_names =", "= titanic_survive() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model", "explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def", "dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console", "titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True):", "titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train,", "explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer def", "else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if xgboost:", "RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged,", "should contain no error\" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db", "console should contain no error\" def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer()", "X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost:", "else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\")", "explainer = ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not", "'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex',", "\"browser console should contain no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer =", "if include_y: reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'],", "X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer =", "def get_catboost_classifier(): X_train, y_train, X_test, y_test = titanic_survive() train_names, test_names", "ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else:", "explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import ExplainerDashboard", "from catboost import CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor", "explainer def get_catboost_regressor(): X_train, y_train, X_test, y_test = titanic_fare() model", "titanic_names() if xgboost: model = XGBRegressor().fit(X_train, y_train) else: model =", "dash_duo.get_logs() == [], \"browser console should contain no error\" def", "def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer = RegressionExplainer(model,", "explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app)", "= get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "console should contain no error\" def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True)", "explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test", "[], \"browser console should contain no error\" def test_classification_dashboard_no_y(dash_duo): explainer", "labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\") model", "def get_catboost_regressor(): X_train, y_train, X_test, y_test = titanic_fare() model =", "error\" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\",", "import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names", "error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\",", "no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer,", "get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "\"browser console should contain no error\" def test_xgboost_classification_dashboard(dash_duo): explainer =", "[], \"browser console should contain no error\" def test_catboost_classification_dashboard(dash_duo): explainer", "= XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if", "= RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats =", "model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return", "explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app)", "titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model,", "explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "console should contain no error\" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer()", "console should contain no error\" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer()", "should contain no error\" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db", "def test_xgboost_classification_dashboard(dash_duo): explainer = get_classification_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "= titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer(", "X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y", "cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test,", "explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db =", "reg_explainer = RegressionExplainer(model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\")", "= get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "no error\" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db = ExplainerDashboard(explainer,", "no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer,", "== [], \"browser console should contain no error\" def test_classification_dashboard_no_y(dash_duo):", "survived', 'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train,", "max_depth=10).fit(X_train, y_train) if include_y: explainer = ClassifierExplainer( model, X_test, y_test,", "cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier():", "no error\" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer,", "X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats,", "get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train,", "else: multi_explainer = ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg'])", "titanic_survive() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model =", "X_test, y_test = titanic_survive() if xgboost: model = XGBClassifier().fit(X_train, y_train)", "idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db", "survived', 'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\") model =", "= XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if", "y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if", "def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False,", "'Embarked'], labels=['Not survived', 'Survived'], idxs=test_names) X_cats, y_cats = explainer.X_merged, explainer.y.astype(\"int\")", "should contain no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db", "'Survived']) else: explainer = ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'],", "= RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer = RegressionExplainer(model, X_test,", "units=\"$\") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test,", "contain no error\" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db =", "cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False)", "titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test,", "from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards import", "get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\",", "should contain no error\" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db", "from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer", "titanic_survive() train_names, test_names = titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train)", "y_test = titanic_survive() train_names, test_names = titanic_names() model = CatBoostClassifier(iterations=100,", "else: multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton',", "y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer", "dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console should", "get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20)", "from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare,", "= ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived'])", "no error\" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer,", "should contain no error\" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db", "def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "explainer = get_catboost_classifier() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "== [], \"browser console should contain no error\" def test_multiclass_dashboard(dash_duo):", "contain no error\" def test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db =", "= get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "console should contain no error\" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False)", "error\" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\",", "error\" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\",", "\"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console should contain", "multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg'])", "X_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer =", "X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer", "[], \"browser console should contain no error\" def test_simple_regression_dashboard(dash_duo): explainer", "catboost import CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor from", "cats=['Sex', 'Deck', 'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer = RegressionExplainer(model, X_test,", "'Deck', 'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True):", "y_train) explainer = ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male',", "\"testing\", timeout=30) assert dash_duo.get_logs() == [], \"browser console should contain", "explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7])", "idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train,", "simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser", "test_cat_boost_regression_dashboard(dash_duo): explainer = get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test =", "X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer =", "include_y: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex',", "dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs() == [], \"browser console", "'Embarked'], idxs=test_names, units=\"$\") else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck',", "test_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer", "contain no error\" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db =", "def test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "error\" def test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\",", "get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_fare() train_names, test_names", "RegressionExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def test_classification_dashboard(dash_duo): explainer", "test_names = titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer =", "= get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\",", "contain no error\" def test_multiclass_dashboard_no_y(dash_duo): explainer = get_multiclass_explainer(include_y=False) db =", "7]) explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer", "title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs() == [],", "y_test = titanic_embarked() train_names, test_names = titanic_names() if xgboost: model", "error\" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\",", "error\" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\",", "model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer = ClassifierExplainer(", "ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if", "labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train,", "model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: if xgboost: multi_explainer", "sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from", "y_train, X_test, y_test = titanic_survive() if xgboost: model = XGBClassifier().fit(X_train,", "y_train, X_test, y_test = titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train)", "== [], \"browser console should contain no error\" def test_cat_boost_regression_dashboard(dash_duo):", "ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_survive()", "return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test =", "test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer def", "= CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer = RegressionExplainer(model, X_test, y_test, cats=[\"Sex\",", "if xgboost: model = XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50,", "y_cats, cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index)", "multi_explainer = ClassifierExplainer(model, X_test, y_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg'])", "= ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert", "\"browser console should contain no error\" def test_catboost_classification_dashboard(dash_duo): explainer =", "'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else: explainer = ClassifierExplainer( model,", "cats=[\"Sex\", 'Deck', 'Embarked']) X_cats, y_cats = explainer.X_merged, explainer.y model =", "ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived'])", "y_train, X_test, y_test = titanic_fare() train_names, test_names = titanic_names() if", "explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train, y_train, X_test, y_test =", "y_cats = explainer.X_merged, explainer.y model = CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5,", "= get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_embarked() train_names, test_names", "\"browser console should contain no error\" def test_simple_regression_dashboard(dash_duo): explainer =", "def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False)", "'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer = ClassifierExplainer(model, X_test, y_test,", "[], \"browser console should contain no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer", "X_train, y_train, X_test, y_test = titanic_embarked() train_names, test_names = titanic_names()", "db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert", "test_names = titanic_names() if xgboost: model = XGBRegressor().fit(X_train, y_train) else:", "CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier,", "title=\"testing\", responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-regression-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() ==", "xgboost: model = XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train,", "return explainer def get_regression_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test =", "test_names = titanic_names() if xgboost: model = XGBClassifier().fit(X_train, y_train) else:", "= CatBoostClassifier(iterations=100, verbose=0).fit(X_train, y_train) explainer = ClassifierExplainer( model, X_test, y_test,", "= ClassifierExplainer(model, X_test, cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) multi_explainer.calculate_properties() return", "= get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "X_test, y_test = titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer", "'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer = ClassifierExplainer(model, X_test, model_output='logodds',", "== [], \"browser console should contain no error\" def test_simple_regression_dashboard(dash_duo):", "'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False,", "units=\"$\") else: reg_explainer = RegressionExplainer(model, X_test, cats=['Sex', 'Deck', 'Embarked'], idxs=test_names,", "RegressionExplainer from explainerdashboard.datasets import titanic_survive, titanic_fare, titanic_embarked, titanic_names from explainerdashboard.dashboards", "'Embarked'], idxs=test_names, units=\"$\") reg_explainer.calculate_properties() return reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train,", "contain no error\" def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db =", "reg_explainer def get_multiclass_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_embarked()", "CatBoostClassifier, CatBoostRegressor from xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble import", "[], \"browser console should contain no error\" def test_regression_dashboard_no_y(dash_duo): explainer", "\"browser console should contain no error\" def test_regression_dashboard_no_y(dash_duo): explainer =", "model = XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train)", "assert dash_duo.get_logs() == [], \"browser console should contain no error\"", "get_classification_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "contain no error\" def test_catboost_classification_dashboard(dash_duo): explainer = get_catboost_classifier() db =", "\"browser console should contain no error\" def test_cat_boost_regression_dashboard(dash_duo): explainer =", "explainer = ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']},", "get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console", "dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs() == [], \"browser console should", "test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "= explainer.X_merged, explainer.y.astype(\"int\") model = CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6,", "should contain no error\" def test_xgboost_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer(xgboost=True) db", "if include_y: explainer = ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck',", "ClassifierExplainer( model, X_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties()", "ClassifierExplainer( model, X_test, y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'],", "= CatBoostClassifier(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = ClassifierExplainer(model,", "== [], \"browser console should contain no error\" def test_xgboost_classification_dashboard(dash_duo):", "y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer", "test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False, simple=True)", "def test_simple_regression_dashboard(dash_duo): explainer = get_regression_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False,", "'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer = ClassifierExplainer(model,", "explainer = get_multiclass_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\",", "= get_multiclass_explainer(xgboost=True) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\",", "cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: if xgboost: multi_explainer =", "train_names, test_names = titanic_names() if xgboost: model = XGBRegressor().fit(X_train, y_train)", "y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived', 'Survived']) else: explainer =", "XGBClassifier().fit(X_train, y_train) else: model = RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y:", "test_multiclass_dashboard(dash_duo): explainer = get_multiclass_explainer() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "RandomForestClassifier(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: explainer = ClassifierExplainer( model, X_test,", "responsive=False, simple=True) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [],", "CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = RegressionExplainer(model, X_cats,", "xgboost: multi_explainer = ClassifierExplainer(model, X_test, y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown',", "= titanic_survive() train_names, test_names = titanic_names() model = CatBoostClassifier(iterations=100, verbose=0).fit(X_train,", "y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y: reg_explainer", "include_y: explainer = ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'],", "y_test = titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train, y_train) explainer =", "titanic_names from explainerdashboard.dashboards import ExplainerDashboard def get_classification_explainer(xgboost=False, include_y=True): X_train, y_train,", "return explainer def test_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer,", "y_test, cats=[{'Gender': ['Sex_female', 'Sex_male', 'Sex_nan']}, 'Deck', 'Embarked'], labels=['Not survived', 'Survived'],", "should contain no error\" def test_classification_dashboard_no_y(dash_duo): explainer = get_classification_explainer(include_y=False) db", "import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers import ClassifierExplainer, RegressionExplainer from explainerdashboard.datasets", "no error\" def test_xgboost_regression_dashboard(dash_duo): explainer = get_regression_explainer(xgboost=True) db = ExplainerDashboard(explainer,", "X_train, y_train, X_test, y_test = titanic_survive() train_names, test_names = titanic_names()", "from xgboost import XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor", "'Embarked'], labels=['Not survived', 'Survived']) explainer.calculate_properties() return explainer def get_regression_explainer(xgboost=False, include_y=True):", "ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=20) assert dash_duo.get_logs() ==", "get_catboost_regressor() db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30)", "'Cherbourg']) multi_explainer.calculate_properties() return multi_explainer def get_catboost_classifier(): X_train, y_train, X_test, y_test", "X_train, y_train, X_test, y_test = titanic_fare() model = CatBoostRegressor(iterations=5, verbose=0).fit(X_train,", "no error\" def test_simple_classification_dashboard(dash_duo): explainer = get_classification_explainer() db = ExplainerDashboard(explainer,", "XGBRegressor().fit(X_train, y_train) else: model = RandomForestRegressor(n_estimators=50, max_depth=10).fit(X_train, y_train) if include_y:", "y_test, model_output='logodds', cats=['Sex', 'Deck'], labels=['Queenstown', 'Southampton', 'Cherbourg']) else: multi_explainer =", "X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train, y_train,", "get_classification_explainer(xgboost=False, include_y=True): X_train, y_train, X_test, y_test = titanic_survive() if xgboost:", "y_train) explainer = RegressionExplainer(model, X_test, y_test, cats=[\"Sex\", 'Deck', 'Embarked']) X_cats,", "responsive=False) dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"h1\", \"testing\", timeout=30) assert dash_duo.get_logs() == [], \"browser", "import XGBClassifier, XGBRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from explainerdashboard.explainers", "= ClassifierExplainer( model, X_test, y_test, cats=['Sex', 'Deck', 'Embarked'], labels=['Not survived',", "= CatBoostRegressor(iterations=5, verbose=0).fit(X_cats, y_cats, cat_features=[5, 6, 7]) explainer = RegressionExplainer(model,", "test_regression_dashboard_no_y(dash_duo): explainer = get_regression_explainer(include_y=False) db = ExplainerDashboard(explainer, title=\"testing\", responsive=False) dash_duo.start_server(db.app)", "dash_duo.start_server(db.app) dash_duo.wait_for_text_to_equal(\"#simple-classifier-composite-title\", \"testing\", timeout=20) assert dash_duo.get_logs() == [], \"browser console", "ClassifierExplainer(model, X_cats, y_cats, idxs=X_test.index) explainer.calculate_properties(include_interactions=False) return explainer def get_catboost_regressor(): X_train,", "get_catboost_regressor(): X_train, y_train, X_test, y_test = titanic_fare() model = CatBoostRegressor(iterations=5,", "titanic_names() if xgboost: model = XGBClassifier().fit(X_train, y_train) else: model =", "return multi_explainer def get_catboost_classifier(): X_train, y_train, X_test, y_test = titanic_survive()" ]
[ "app if __name__ == \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0}", "# if a > 10: # break print(\"Done\") #print(\"{0} elements", "> 10: # break print(\"Done\") #print(\"{0} elements in the dataset\".format(len(ds.)))", "glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files in input dir {1}\".format(len(files),inputDataDir)) print(\"First", "== \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files in", "sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1]", "ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels =", "mode=\"L\").save(fileName) #print(element) #print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a", "= sys.argv[2] # test app if __name__ == \"__main__\": files", "if __name__ == \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet", "input dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for", "= tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element", "glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as tfDsParquet", "sys.argv[2] # test app if __name__ == \"__main__\": files =", "tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1] outputDir = sys.argv[2] #", "import os from glob import glob import png sys.path.append(os.path.join(__file__,'..','..')) from", "= element sampleId = sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName)", "sampleId = sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample", "element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element sampleId = sampleId.decode(\"utf-8\")", "import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir", "__name__ == \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files", "\"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files in input", "name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a += 1 #", "parquet files in input dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds", "if a > 10: # break print(\"Done\") #print(\"{0} elements in", "files in input dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds =", "a > 10: # break print(\"Done\") #print(\"{0} elements in the", "as tf import sys import os from glob import glob", "tfDsParquet inputDataDir = sys.argv[1] outputDir = sys.argv[2] # test app", "in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element sampleId = sampleId.decode(\"utf-8\") fileName", "dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element", "png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) #", "sys.argv[1] outputDir = sys.argv[2] # test app if __name__ ==", "= sys.argv[1] outputDir = sys.argv[2] # test app if __name__", "outputDir = sys.argv[2] # test app if __name__ == \"__main__\":", "= glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files in input dir {1}\".format(len(files),inputDataDir))", "is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a += 1 # if", "= os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape)", "os from glob import glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest", "{0} parquet files in input dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0]))", "import glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as", "# test app if __name__ == \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\"))", "inputDataDir = sys.argv[1] outputDir = sys.argv[2] # test app if", "sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name is", "sys import os from glob import glob import png sys.path.append(os.path.join(__file__,'..','..'))", "{0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a += 1 # if a", "+= 1 # if a > 10: # break print(\"Done\")", "element sampleId = sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element)", "#print(\"Iterating...\") sampleId,pixels = element sampleId = sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId))", "= sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name", "{0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels", "glob import glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet", "os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape)", "a += 1 # if a > 10: # break", "#print(pixels.shape) # a += 1 # if a > 10:", "test app if __name__ == \"__main__\": files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found", "is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print(\"Iterating...\")", "import tensorflow as tf import sys import os from glob", "print(\"Found {0} parquet files in input dir {1}\".format(len(files),inputDataDir)) print(\"First is", "# a += 1 # if a > 10: #", "tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element sampleId", "png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir =", "fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels, mode=\"L\").save(fileName) #print(element) #print(\"sample name is {0}\".format(sampleId))", "from tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1] outputDir", "#print(sampleIds.shape) #print(pixels.shape) # a += 1 # if a >", "ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element sampleId = sampleId.decode(\"utf-8\") fileName =", "for element in ds.as_numpy_iterator(): #print(\"Iterating...\") sampleId,pixels = element sampleId =", "files = glob(os.path.join(inputDataDir,\"train*.parquet\")) print(\"Found {0} parquet files in input dir", "from glob import glob import png sys.path.append(os.path.join(__file__,'..','..')) from tfDataIngest import", "tensorflow as tf import sys import os from glob import", "import tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1] outputDir = sys.argv[2]", "tf import sys import os from glob import glob import", "{1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in", "#print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a += 1", "import sys import os from glob import glob import png", "as tfDsParquet inputDataDir = sys.argv[1] outputDir = sys.argv[2] # test", "#print(element) #print(\"sample name is {0}\".format(sampleId)) #print(sampleIds.shape) #print(pixels.shape) # a +=", "in input dir {1}\".format(len(files),inputDataDir)) print(\"First is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]])", "sampleId,pixels = element sampleId = sampleId.decode(\"utf-8\") fileName = os.path.join(outputDir,\"{0}.png\".format(sampleId)) png.from_array(pixels,", "print(\"First is {0}\".format(files[0])) ds = tfDsParquet.create_parquet_dataset([files[0]]) for element in ds.as_numpy_iterator():", "tfDataIngest import tfDataSetParquet as tfDsParquet inputDataDir = sys.argv[1] outputDir =", "1 # if a > 10: # break print(\"Done\") #print(\"{0}" ]
[ "0)) return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def", "wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules", "class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output, tabs): if name", "(see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY \"\"\"", "if name == 'label': # translate & into _ as", "if compat.IS_PHOENIX: import_modules = ['import wx.adv\\n'] if compat.IS_PHOENIX: def cn(self,", "= False set_default_style = True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj)", "'%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize,", "def write_property(self, name, val, output, tabs): if name == 'label':", "return 'wx.adv.' + name return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self,", "WITH NO WARRANTY \"\"\" import common, compat import wcodegen class", "val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return DatePickerCtrlXrcObject(obj)", "if name.startswith('wx'): return 'wx.adv.' + name[2:] elif name.startswith('EVT_'): return 'wx.adv.'", "= True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default',", "'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass, CppDatePickerCtrlGenerator(klass))", "formatted items again if name.startswith('wx.'): return name if name.startswith('wx'): return", "_ as accelerator marker val2 = val.replace('&', '_') if val.count('&&')", "items again if name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.adv.'", "= int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl", "name, val, output, tabs): if name == 'label': # translate", "about to be supported fully: if compat.IS_PHOENIX: import_modules = ['import", "import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s,", "name.startswith('EVT_'): return 'wx.adv.' + name return name def _prepare_tmpl_content(self, obj):", "val, output, tabs): if name == 'label': # translate &", "marker val2 = val.replace('&', '_') if val.count('&&') > 0: while", "name.startswith('wx'): return 'wx.adv.' + name[2:] elif name.startswith('EVT_'): return 'wx.adv.' +", "def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass,", "= klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass, CppDatePickerCtrlGenerator(klass)) common.register('XRC', klass,", "to depend on the code generator when Phoenix is about", "'wx.adv.' + name return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj)", "tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] =", "obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter):", "self.has_setdefault = int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>']", "2014-2016 <NAME> @copyright: 2016-2021 <NAME> @license: MIT (see LICENSE.txt) -", "'%(style)s);\\n' prefix_style = False set_default_style = True def _prepare_tmpl_content(self, obj):", "'wx.adv.' + name[2:] elif name.startswith('EVT_'): return 'wx.adv.' + name return", "= val.find('&&') if index < 0: break val = val2[:index]", "index = val.find('&&') if index < 0: break val =", "name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.adv.' + name[2:] elif", "= common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output, tabs):", "# don't process already formatted items again if name.startswith('wx.'): return", "THIS PROGRAM COMES WITH NO WARRANTY \"\"\" import common, compat", "name if name.startswith('wx'): return 'wx.adv.' + name[2:] elif name.startswith('EVT_'): return", "= int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class", "CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s = new %(klass)s(%(parent)s,", "& into _ as accelerator marker val2 = val.replace('&', '_')", "compat.IS_PHOENIX: import_modules = ['import wx.adv\\n'] if compat.IS_PHOENIX: def cn(self, name):", "self.has_setdefault = int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC']", "obj) self.has_setdefault = int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen =", "xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output,", "= val.replace('&', '_') if val.count('&&') > 0: while True: index", "functions for wxDatePickerCtrl objects @copyright: 2002-2007 <NAME> @copyright: 2014-2016 <NAME>", "def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return", "klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass, CppDatePickerCtrlGenerator(klass)) common.register('XRC', klass, xrc_code_generator)", "+ '&&' + val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name,", "val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return DatePickerCtrlXrcObject(obj) def initialize():", "MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY", "COMES WITH NO WARRANTY \"\"\" import common, compat import wcodegen", "0: break val = val2[:index] + '&&' + val2[index+2:] else:", "PROGRAM COMES WITH NO WARRANTY \"\"\" import common, compat import", "False set_default_style = True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault", "wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style = False set_default_style = True", "True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0))", "= 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass,", "tabs): if name == 'label': # translate & into _", "= val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return DatePickerCtrlXrcObject(obj) def", "['<wx/datectrl.h>'] tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \\", "into _ as accelerator marker val2 = val.replace('&', '_') if", "<NAME> @copyright: 2014-2016 <NAME> @copyright: 2016-2021 <NAME> @license: MIT (see", "as accelerator marker val2 = val.replace('&', '_') if val.count('&&') >", "< 0: break val = val2[:index] + '&&' + val2[index+2:]", "@copyright: 2014-2016 <NAME> @copyright: 2016-2021 <NAME> @license: MIT (see LICENSE.txt)", "PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the", "' \\ '%(style)s);\\n' prefix_style = False set_default_style = True def", "val.find('&&') if index < 0: break val = val2[:index] +", "klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++',", "tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the following", "code generator when Phoenix is about to be supported fully:", "= ['import wx.adv\\n'] if compat.IS_PHOENIX: def cn(self, name): # don't", "name[2:] elif name.startswith('EVT_'): return 'wx.adv.' + name return name def", "- THIS PROGRAM COMES WITH NO WARRANTY \"\"\" import common,", "LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY \"\"\" import", "elif name.startswith('EVT_'): return 'wx.adv.' + name return name def _prepare_tmpl_content(self,", "Phoenix is about to be supported fully: if compat.IS_PHOENIX: import_modules", "@copyright: 2016-2021 <NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM", "_prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return def", "@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO", "val.replace('&', '_') if val.count('&&') > 0: while True: index =", "name): # don't process already formatted items again if name.startswith('wx.'):", "'label': # translate & into _ as accelerator marker val2", "common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output, tabs): if", "again if name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.adv.' +", "'_') if val.count('&&') > 0: while True: index = val.find('&&')", "output, tabs): if name == 'label': # translate & into", "prefix_style = False set_default_style = True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self,", "# XXX the following needs to depend on the code", "'&&' + val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val,", "= %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the following needs to depend", "_prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return class", "DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python',", "name return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault =", "+ name[2:] elif name.startswith('EVT_'): return 'wx.adv.' + name return name", "+ val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output,", "is about to be supported fully: if compat.IS_PHOENIX: import_modules =", "process already formatted items again if name.startswith('wx.'): return name if", "name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0))", "output, tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl']", "cn(self, name): # don't process already formatted items again if", "xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val,", "2016-2021 <NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES", "def cn(self, name): # don't process already formatted items again", "\\ '%(style)s);\\n' prefix_style = False set_default_style = True def _prepare_tmpl_content(self,", "supported fully: if compat.IS_PHOENIX: import_modules = ['import wx.adv\\n'] if compat.IS_PHOENIX:", "return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self,", "# translate & into _ as accelerator marker val2 =", "depend on the code generator when Phoenix is about to", "== 'label': # translate & into _ as accelerator marker", "= new %(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, '", "True: index = val.find('&&') if index < 0: break val", "2002-2007 <NAME> @copyright: 2014-2016 <NAME> @copyright: 2016-2021 <NAME> @license: MIT", "if index < 0: break val = val2[:index] + '&&'", "\"\"\" import common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl =", "%(id)s%(style)s)\\n' # XXX the following needs to depend on the", "wxDatePickerCtrl objects @copyright: 2002-2007 <NAME> @copyright: 2014-2016 <NAME> @copyright: 2016-2021", "for wxDatePickerCtrl objects @copyright: 2002-2007 <NAME> @copyright: 2014-2016 <NAME> @copyright:", "XXX the following needs to depend on the code generator", "name, val, output, tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass =", "class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX", "import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n'", "'%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the following needs to", "break val = val2[:index] + '&&' + val2[index+2:] else: val", "WARRANTY \"\"\" import common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl", "= ['<wx/datectrl.h>'] tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, '", "'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style = False set_default_style", "val = val2[:index] + '&&' + val2[index+2:] else: val =", "objects @copyright: 2002-2007 <NAME> @copyright: 2014-2016 <NAME> @copyright: 2016-2021 <NAME>", "generator when Phoenix is about to be supported fully: if", "import_modules = ['import wx.adv\\n'] if compat.IS_PHOENIX: def cn(self, name): #", "if compat.IS_PHOENIX: def cn(self, name): # don't process already formatted", "\"\"\"\\ Code generator functions for wxDatePickerCtrl objects @copyright: 2002-2007 <NAME>", "wx.adv\\n'] if compat.IS_PHOENIX: def cn(self, name): # don't process already", "= val2[:index] + '&&' + val2[index+2:] else: val = val2", "following needs to depend on the code generator when Phoenix", "when Phoenix is about to be supported fully: if compat.IS_PHOENIX:", "xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass", "= '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the following needs", "obj) self.has_setdefault = int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules =", "%(klass)s(%(parent)s, %(id)s%(style)s)\\n' # XXX the following needs to depend on", "be supported fully: if compat.IS_PHOENIX: import_modules = ['import wx.adv\\n'] if", "if name.startswith('wx.'): return name if name.startswith('wx'): return 'wx.adv.' + name[2:]", "return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s =", "generator functions for wxDatePickerCtrl objects @copyright: 2002-2007 <NAME> @copyright: 2014-2016", "common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass)) common.register('C++', klass, CppDatePickerCtrlGenerator(klass)) common.register('XRC',", "val2 = val.replace('&', '_') if val.count('&&') > 0: while True:", "%(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style", "set_default_style = True def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault =", "%(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n'", "while True: index = val.find('&&') if index < 0: break", "0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s", "the following needs to depend on the code generator when", "val, output, tabs) return DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl'", "Code generator functions for wxDatePickerCtrl objects @copyright: 2002-2007 <NAME> @copyright:", "+ name return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault", "int(obj.properties.get('default', 0)) return class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl =", "translate & into _ as accelerator marker val2 = val.replace('&',", "new %(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\", "on the code generator when Phoenix is about to be", "wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen", "<NAME> @copyright: 2016-2021 <NAME> @license: MIT (see LICENSE.txt) - THIS", "DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name, val, output, tabs): if name ==", "common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s =", "write_property(self, name, val, output, tabs): if name == 'label': #", "accelerator marker val2 = val.replace('&', '_') if val.count('&&') > 0:", "initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass common.register('python', klass, PythonDatePickerCtrlGenerator(klass))", "def _prepare_tmpl_content(self, obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return", "['import wx.adv\\n'] if compat.IS_PHOENIX: def cn(self, name): # don't process", "return DatePickerCtrlXrcObject(obj) def initialize(): klass = 'wxDatePickerCtrl' common.class_names['EditDatePickerCtrl'] = klass", "val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs)", "val.count('&&') > 0: while True: index = val.find('&&') if index", "class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter): import_modules = ['<wx/datectrl.h>'] tmpl = '%(name)s = new", "to be supported fully: if compat.IS_PHOENIX: import_modules = ['import wx.adv\\n']", "compat.IS_PHOENIX: def cn(self, name): # don't process already formatted items", "' \\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style =", "obj): wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default', 0)) return def xrc_code_generator(obj):", "NO WARRANTY \"\"\" import common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter):", "else: val = val2 xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs) return", "if val.count('&&') > 0: while True: index = val.find('&&') if", "the code generator when Phoenix is about to be supported", "= '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime, wxDefaultPosition,", "import common, compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s", "\\ 'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style = False", "fully: if compat.IS_PHOENIX: import_modules = ['import wx.adv\\n'] if compat.IS_PHOENIX: def", "0: while True: index = val.find('&&') if index < 0:", "val2[:index] + '&&' + val2[index+2:] else: val = val2 xrcgen.DefaultXrcObject.write_property(self,", "return 'wx.adv.' + name[2:] elif name.startswith('EVT_'): return 'wx.adv.' + name", "<NAME> @license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH", "wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\\n' #", "def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject): def write_property(self, name,", "index < 0: break val = val2[:index] + '&&' +", "return name def _prepare_tmpl_content(self, obj): wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj) self.has_setdefault = int(obj.properties.get('default',", "int(obj.properties.get('default', 0)) return def xrc_code_generator(obj): xrcgen = common.code_writers['XRC'] class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject):", "> 0: while True: index = val.find('&&') if index <", "wxDefaultPosition, wxDefaultSize, ' \\ '%(style)s);\\n' prefix_style = False set_default_style =", "already formatted items again if name.startswith('wx.'): return name if name.startswith('wx'):", "compat import wcodegen class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter): tmpl = '%(name)s = %(klass)s(%(parent)s,", "don't process already formatted items again if name.startswith('wx.'): return name", "@copyright: 2002-2007 <NAME> @copyright: 2014-2016 <NAME> @copyright: 2016-2021 <NAME> @license:", "needs to depend on the code generator when Phoenix is", "name == 'label': # translate & into _ as accelerator", "tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \\ 'wxDefaultDateTime,", "return name if name.startswith('wx'): return 'wx.adv.' + name[2:] elif name.startswith('EVT_'):" ]
[ "import mxnet as mx from mxnet import gluon from mxnet", "ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill()", "valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names = ['hm36label']", "config:{}\\n'.format(pprint.pformat(config))) # define context if config.useGPU: ctx = [mx.gpu(int(i)) for", "= MeanSquareLoss() # optimizer optimizer, optimizer_params = get_optimizer(config, ctx) #", "ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')] else: ctx =", "# Parse config and mkdir output logger, final_Model_path = create_logger(config)", "config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if", "valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill() if __name__", "mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss()", "from config import config, gen_config, update_config_from_args, s_args config = update_config_from_args(config,", "in config.gpu.split(',')] else: ctx = mx.cpu() logger.info(\"Using context:\", ctx) #", "Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names", "config = update_config_from_args(config, s_args) def main(): # Parse config and", "config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss and metric mean3d =", "update_config_from_args, s_args config = update_config_from_args(config, s_args) def main(): # Parse", "= [] valid_imdbs = [] for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct", "loss, eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill() if __name__ ==", "* from lib.core.metric import MPJPEMetric from lib.core.loss import MeanSquareLoss from", "mxnet import init from lib.core.get_optimizer import * from lib.core.metric import", "= train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric',", "# optimizer optimizer, optimizer_params = get_optimizer(config, ctx) # train and", "= data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1,", "define loss and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d']", "config.gpu.split(',')] else: ctx = mx.cpu() logger.info(\"Using context:\", ctx) # dataset,", "# define loss and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d =", "config.final_Model_path)) data_names = ['hm36data'] label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0],", "epoch, config, logger=logger, ctx=ctx) logger.kill() if __name__ == '__main__': main()", "for i in config.gpu.split(',')] else: ctx = mx.cpu() logger.info(\"Using context:\",", "valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize,", "config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names =", "get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(),", "trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net,", "train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data']", "config and mkdir output logger, final_Model_path = create_logger(config) config.final_Model_path =", "data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() ==", "batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names,", "mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch,", "train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch,", "= final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context if", "from lib.utils import * from lib.dataset.hm36 import hm36 from config", "generate trainset/ validation set train_imdbs = [] valid_imdbs = []", "shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names,", "JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert", "os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net)", "logger.info(net) # define loss and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d", "= MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss", "and mkdir output logger, final_Model_path = create_logger(config) config.final_Model_path = final_Model_path", "loss, train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric,", "s_args config = update_config_from_args(config, s_args) def main(): # Parse config", "std3d) loss = MeanSquareLoss() # optimizer optimizer, optimizer_params = get_optimizer(config,", "else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss", "and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric =", "if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx)", "optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter,", "lib.dataset.hm36 import hm36 from config import config, gen_config, update_config_from_args, s_args", "mxnet as mx from mxnet import gluon from mxnet import", "mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d,", "train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d,", "loss = MeanSquareLoss() # optimizer optimizer, optimizer_params = get_optimizer(config, ctx)", "else: ctx = mx.cpu() logger.info(\"Using context:\", ctx) # dataset, generate", "update_config_from_args(config, s_args) def main(): # Parse config and mkdir output", "logger, final_Model_path = create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training", "for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric,", "from mxnet import init from lib.core.get_optimizer import * from lib.core.metric", "gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context if config.useGPU: ctx", "from mxnet import gluon from mxnet import init from lib.core.get_optimizer", "= get_optimizer(config, ctx) # train and valid TrainDBsize = train_data_iter.get_size()", "config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx)", "= train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d)", "DB size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer", "from lib.core.loader import JointsDataIter from lib.network import get_net from lib.net_module", "import MeanSquareLoss from lib.core.loader import JointsDataIter from lib.network import get_net", "def main(): # Parse config and mkdir output logger, final_Model_path", "config, gen_config, update_config_from_args, s_args config = update_config_from_args(config, s_args) def main():", "Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i],", "net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss and", "batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net =", "DB size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter):", "logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx)", "valid_imdbs = [] for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i],", "\"Valid DB size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter)", "gen_config, update_config_from_args, s_args config = update_config_from_args(config, s_args) def main(): #", "train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize, \"Valid DB", "if config.useGPU: ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')] else:", "valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config) if config.resume: ckp_path =", "* from lib.dataset.hm36 import hm36 from config import config, gen_config,", "ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize)", "validation set train_imdbs = [] valid_imdbs = [] for i", "ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss and metric", "mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for", "= ['hm36data'] label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names", "from lib.core.metric import MPJPEMetric from lib.core.loss import MeanSquareLoss from lib.core.loader", "= JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger)", "import MPJPEMetric from lib.core.loss import MeanSquareLoss from lib.core.loader import JointsDataIter", "define context if config.useGPU: ctx = [mx.gpu(int(i)) for i in", "config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names = ['hm36label'] train_data_iter =", "ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid:", "epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric, epoch,", "MPJPEMetric from lib.core.loss import MeanSquareLoss from lib.core.loader import JointsDataIter from", "network net = get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path,", "trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter,", "import get_net from lib.net_module import * from lib.utils import *", "logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context if config.useGPU: ctx = [mx.gpu(int(i))", "['hm36data'] label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names =", "loss and metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric", "mx.cpu() logger.info(\"Using context:\", ctx) # dataset, generate trainset/ validation set", "config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names =", "['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE,", "from lib.core.loss import MeanSquareLoss from lib.core.loader import JointsDataIter from lib.network", "# network net = get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp)", "mkdir output logger, final_Model_path = create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path,", "optimizer optimizer, optimizer_params = get_optimizer(config, ctx) # train and valid", "from lib.network import get_net from lib.net_module import * from lib.utils", "assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config) if", "logger.info(\"Train DB size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize) if not isinstance(train_data_iter,", "shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net", "isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)", "data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() #", "= ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names,", "lib.net_module import * from lib.utils import * from lib.dataset.hm36 import", "hm36 from config import config, gen_config, update_config_from_args, s_args config =", "for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\",", "lib.core.loader import JointsDataIter from lib.network import get_net from lib.net_module import", "[] valid_imdbs = [] for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\",", "valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize) if not", "output logger, final_Model_path = create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml'))", "train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize,", "JointsDataIter from lib.network import get_net from lib.net_module import * from", "JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter", "metric mean3d = train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric',", "data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0],", "train_imdbs = [] valid_imdbs = [] for i in range(len(config.DATASET.train_image_set)):", "ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define", "net.hybridize() logger.info(net) # define loss and metric mean3d = train_data_iter.get_meanstd()['mean3d']", "and valid TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB", "= gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net,", "size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer =", "Parse config and mkdir output logger, final_Model_path = create_logger(config) config.final_Model_path", "= data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all()", "config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names = ['hm36label'] train_data_iter", "set train_imdbs = [] valid_imdbs = [] for i in", "from lib.net_module import * from lib.utils import * from lib.dataset.hm36", "eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill() if __name__ == '__main__':", "= mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in", "final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context if config.useGPU:", "lib.core.loss import MeanSquareLoss from lib.core.loader import JointsDataIter from lib.network import", "pprint import mxnet as mx from mxnet import gluon from", "final_Model_path = create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config)))", "config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context", "[mx.gpu(int(i)) for i in config.gpu.split(',')] else: ctx = mx.cpu() logger.info(\"Using", "= [] for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \",", "lib.core.get_optimizer import * from lib.core.metric import MPJPEMetric from lib.core.loss import", "validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx) logger.kill() if", "import hm36 from config import config, gen_config, update_config_from_args, s_args config", "import * from lib.core.metric import MPJPEMetric from lib.core.loss import MeanSquareLoss", "= [mx.gpu(int(i)) for i in config.gpu.split(',')] else: ctx = mx.cpu()", "runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter =", "import init from lib.core.get_optimizer import * from lib.core.metric import MPJPEMetric", "mxnet import gluon from mxnet import init from lib.core.get_optimizer import", "MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss =", "TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize,", "i in config.gpu.split(',')] else: ctx = mx.cpu() logger.info(\"Using context:\", ctx)", "# define context if config.useGPU: ctx = [mx.gpu(int(i)) for i", "# dataset, generate trainset/ validation set train_imdbs = [] valid_imdbs", "[] for i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset", "import JointsDataIter from lib.network import get_net from lib.net_module import *", "std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss() #", "range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i],", "config.useGPU: ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')] else: ctx", "TrainDBsize, \"Valid DB size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter =", "dataset, generate trainset/ validation set train_imdbs = [] valid_imdbs =", "gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer,", "size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize) if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter", "optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss,", "context:\", ctx) # dataset, generate trainset/ validation set train_imdbs =", "<reponame>lck1201/simple-effective-3Dpose-baseline<gh_stars>10-100 import pprint import mxnet as mx from mxnet import", "lib.network import get_net from lib.net_module import * from lib.utils import", "logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config)", "= JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger)", "if not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(),", "trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch):", "from lib.dataset.hm36 import hm36 from config import config, gen_config, update_config_from_args,", "Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path))", "# train and valid TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size()", "= train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize, \"Valid", "config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger,", "valid TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train DB size:\",", "\", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i],", "data_names = ['hm36data'] label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0,", "in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i],", "'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define context if config.useGPU: ctx =", "lib.utils import * from lib.dataset.hm36 import hm36 from config import", "MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss() # optimizer optimizer, optimizer_params", "data_names, label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names", "ctx = mx.cpu() logger.info(\"Using context:\", ctx) # dataset, generate trainset/", "= get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else:", "optimizer_params = get_optimizer(config, ctx) # train and valid TrainDBsize =", "= mx.cpu() logger.info(\"Using context:\", ctx) # dataset, generate trainset/ validation", "* from lib.utils import * from lib.dataset.hm36 import hm36 from", "i in range(len(config.DATASET.train_image_set)): logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i])", "epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss, eval_metric, epoch, config,", "s_args) def main(): # Parse config and mkdir output logger,", "import gluon from mxnet import init from lib.core.get_optimizer import *", "logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names = data_names, label_names=label_names, shuffle=False,", "= update_config_from_args(config, s_args) def main(): # Parse config and mkdir", "config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i], config.final_Model_path)) data_names = ['hm36data'] label_names", "runmode=1, data_names = data_names, label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all()", "init from lib.core.get_optimizer import * from lib.core.metric import MPJPEMetric from", "eval_metric = MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss() # optimizer", "std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric =", "import * from lib.dataset.hm36 import hm36 from config import config,", "= create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) #", "in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config,", "= valid_data_iter.get_size() logger.info(\"Train DB size:\", TrainDBsize, \"Valid DB size:\",ValidDBsize) if", "train_data_iter.get_meanstd()['mean3d'] std3d = train_data_iter.get_meanstd()['std3d'] train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric", "label_names = ['hm36label'] train_data_iter = JointsDataIter(train_imdbs[0], runmode=0, data_names = data_names,", "train_metric = MPJPEMetric('train_metric', mean3d, std3d) eval_metric = MPJPEMetric('valid_metric', mean3d, std3d)", "import config, gen_config, update_config_from_args, s_args config = update_config_from_args(config, s_args) def", "trainset/ validation set train_imdbs = [] valid_imdbs = [] for", "not isinstance(train_data_iter, mx.io.PrefetchingIter): train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer,", "from lib.core.get_optimizer import * from lib.core.metric import MPJPEMetric from lib.core.loss", "if config.NETWORK.hybrid: net.hybridize() logger.info(net) # define loss and metric mean3d", "config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i])) valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i], config.DATASET.root_path[i],", "ctx) # train and valid TrainDBsize = train_data_iter.get_size() ValidDBsize =", "= os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize()", "== valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config) if config.resume: ckp_path", "create_logger(config) config.final_Model_path = final_Model_path gen_config(os.path.join(final_Model_path, 'hyperParams.yaml')) logger.info('Training config:{}\\n'.format(pprint.pformat(config))) # define", "train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network net = get_net(config) if config.resume:", "optimizer, optimizer_params = get_optimizer(config, ctx) # train and valid TrainDBsize", "config import config, gen_config, update_config_from_args, s_args config = update_config_from_args(config, s_args)", "train and valid TrainDBsize = train_data_iter.get_size() ValidDBsize = valid_data_iter.get_size() logger.info(\"Train", "range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch): trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger,", "as mx from mxnet import gluon from mxnet import init", "import * from lib.utils import * from lib.dataset.hm36 import hm36", "lib.core.metric import MPJPEMetric from lib.core.loss import MeanSquareLoss from lib.core.loader import", "main(): # Parse config and mkdir output logger, final_Model_path =", "net = get_net(config) if config.resume: ckp_path = os.path.join(config.resumeckp) net.collect_params().load(ckp_path, ctx=ctx)", "= MPJPEMetric('valid_metric', mean3d, std3d) loss = MeanSquareLoss() # optimizer optimizer,", "train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx) validNet(net, valid_data_iter, loss,", "get_optimizer(config, ctx) # train and valid TrainDBsize = train_data_iter.get_size() ValidDBsize", "get_net from lib.net_module import * from lib.utils import * from", "MeanSquareLoss() # optimizer optimizer, optimizer_params = get_optimizer(config, ctx) # train", "context if config.useGPU: ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')]", "train_data_iter = mx.io.PrefetchingIter(train_data_iter) trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) for epoch", "mean3d, std3d) loss = MeanSquareLoss() # optimizer optimizer, optimizer_params =", "MeanSquareLoss from lib.core.loader import JointsDataIter from lib.network import get_net from", "label_names=label_names, shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger) assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all() # network", "mx from mxnet import gluon from mxnet import init from", "logger.info(\"Using context:\", ctx) # dataset, generate trainset/ validation set train_imdbs", "net.collect_params().load(ckp_path, ctx=ctx) else: net.initialize(init=init.MSRAPrelu(), ctx=ctx) if config.NETWORK.hybrid: net.hybridize() logger.info(net) #", "label_names=label_names, shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger) valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1, data_names =", "logger.info(\"Construct Dataset:\", config.DATASET.dbname[i], \", Dataset Path:\", config.DATASET.dataset_path[i]) train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i], config.DATASET.root_path[i], config.DATASET.dataset_path[i]))", "gluon from mxnet import init from lib.core.get_optimizer import * from", "import pprint import mxnet as mx from mxnet import gluon", "ctx) # dataset, generate trainset/ validation set train_imdbs = []" ]
[ "ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler =", "'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network", "network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network", "parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path =", "args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if args.task", "type=str, default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining", "#args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature", "torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if args.task == 'moco':", "os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size", "cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(),", "default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining models\")", "type=str, default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\")", "output def main(): parser = argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\")", "import transforms import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network,", "= network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:,", "network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network =", "get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network = get_simclr_network(args.backbone,", "import torch.distributed as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES =", "bank: {} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0)", "sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train memory bank: {} points.\".format(n_train_points)) train_memory_bank", "parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where", "else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'),", "= img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else:", "torch.distributed as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True", "# network = ResNet(50, frozen_stages=4) if args.task == 'moco': network", "= get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path,", "elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif", "list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset =", "idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img", "warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs all_gather operation on the provided", "DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler,", "pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path,", "ImageNetInstanceLMDB from torchvision import transforms import argparse from BaseTaskModel.task_network import", "'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino':", "os from tqdm import tqdm import torch.backends.cudnn as cudnn import", "help='node rank for distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining", "args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50,", "no gradient. \"\"\" tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]", "default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path =", "train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation)", "feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature =", "for data in tqdm(train_dataloader): idx, img, _ = data idx", "'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup':", "from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import", "network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network =", "memory bank: {} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0)", "transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),", "BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from", "= args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if", "data in tqdm(train_dataloader): idx, img, _ = data idx =", "transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,", "return output def main(): parser = argparse.ArgumentParser(\"The first stage of", "import torch.backends.cudnn as cudnn import torch from datasets import ImageNetInstance,", "Performs all_gather operation on the provided tensors. *** Warning ***:", "* ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\"", "for data in tqdm(val_dataloader): idx, img, _ = data idx", "= get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network =", "== 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost':", "Warning ***: torch.distributed.all_gather has no gradient. \"\"\" tensors_gather = [torch.ones_like(tensor)", "feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x", "train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset,", "= ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler", "= get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network = get_swav_network(pretrained_path,", "has no gradient. \"\"\" tensors_gather = [torch.ones_like(tensor) for _ in", "= torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with", "= torch.cat(tensors_gather, dim=0) return output def main(): parser = argparse.ArgumentParser(\"The", "default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str,", "get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader", "{} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for", "= os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank)", "= get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network =", "memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data path\")", "<filename>FastLinear/generate_memory_bank.py import os from tqdm import tqdm import torch.backends.cudnn as", "train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank: {} points.\".format(n_val_points))", "data in tqdm(val_dataloader): idx, img, _ = data idx =", "datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import argparse", "argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network,", "augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],", "pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path,", "to save the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\",", "on the provided tensors. *** Warning ***: torch.distributed.all_gather has no", "parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to save the memory_bank\") parser.add_argument(\"--backbone\", type=str,", "as cudnn import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from", "type=int, help='node rank for distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the", "args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task", "0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path,", "DataLoader from PIL import ImageFile, Image import torch.distributed as dist", "x in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous())", "non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature =", "interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ])", "\"\"\" tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor,", "default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true')", "transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path,", "transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False,", "'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr':", "get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network = get_sup_network(args.backbone,", "idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True:", "cudnn import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision", "if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img,", "= data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True)", "import ImageFile, Image import torch.distributed as dist from lars import", "num_workers=4) print(\"Initializing train memory bank: {} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points,", "print(\"Initializing val memory bank: {} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach()", "pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to save the memory_bank\")", "torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC),", "os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) #", "= DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train memory bank:", "len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader =", "help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128,", "network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in feature]", "{'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if __name__ ==", "first stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for", "elif args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else:", "args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise", "concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader):", "help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\",", "n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4)", "help=\"where to save the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str,", "== 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "concat_all_gather(tensor): \"\"\" Performs all_gather operation on the provided tensors. ***", "feature = [x[:, 0] for x in feature] feature =", "get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path,", "import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import argparse from", "shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader =", "network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network", "all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather", "type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path", "def main(): parser = argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\") parser.add_argument('--local_rank',", "for x in feature] feature = torch.cat(feature, dim=-1) feature =", "feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark", "= [x[:, 0] for x in feature] feature = torch.cat(feature,", "ResNet(50, frozen_stages=4) if args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer)", "network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([", "n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size,", "tqdm(train_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True)", "= ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else:", "def concat_all_gather(tensor): \"\"\" Performs all_gather operation on the provided tensors.", "transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb:", "= torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader):", "train memory bank: {} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing", "img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img)", "== 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav':", "\"\"\" Performs all_gather operation on the provided tensors. *** Warning", "elif args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif", "= parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path =", "import DataLoader from PIL import ImageFile, Image import torch.distributed as", "models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\",", "dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args =", "_ = data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank,", "layer\") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path", "from PIL import ImageFile, Image import torch.distributed as dist from", "tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def main():", "= get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network =", "rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset,", "get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader from PIL import", "val memory bank: {} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval()", "from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore')", "elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task", "= torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224),", "help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args", "get_dino_network from torch.utils.data import DataLoader from PIL import ImageFile, Image", "feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4)", "import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network,", "= concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu()", "torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank':", "data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if", "0] for x in feature] feature = torch.cat(feature, dim=-1) feature", "feature.detach().cpu() for data in tqdm(val_dataloader): idx, img, _ = data", "if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank },", "DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train memory bank: {}", "_ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0)", "if args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task", "from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network", "argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node rank", "dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings", "args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task", "idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank", "stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed", "= torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank: {} points.\".format(n_val_points)) val_memory_bank", "parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\",", "action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path)", "feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] =", "= idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'):", "network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network", "= feature.detach().cpu() for data in tqdm(val_dataloader): idx, img, _ =", "print(\"Initializing train memory bank: {} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach()", "default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to save", "tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False)", "args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task", "std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb',", "== 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError", "'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,", "memory bank: {} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val", "0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if", "type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\", type=int,", "torch.utils.data import DataLoader from PIL import ImageFile, Image import torch.distributed", "ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import argparse from BaseTaskModel.task_network", "transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset", "= len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader", "tqdm import tqdm import torch.backends.cudnn as cudnn import torch from", "get_sup_network, get_dino_network from torch.utils.data import DataLoader from PIL import ImageFile,", "get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader from PIL", "size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature", "= len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler,", "'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,", "distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str,", "feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img,", "else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark =", "= os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size =", "network = ResNet(50, frozen_stages=4) if args.task == 'moco': network =", "pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path,", "PIL import ImageFile, Image import torch.distributed as dist from lars", "img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature", "range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output", "default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\", type=int, default=32,", "== 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "default=\"\", help=\"where to save the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\",", "= ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation)", "torch.backends.cudnn as cudnn import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB", "main(): parser = argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1,", "train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation)", "= True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs all_gather", "pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str,", "tqdm import torch.backends.cudnn as cudnn import torch from datasets import", "network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network", "= torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset)", "= ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler", "feature_layer=args.feature_layer) elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif", "pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing", "[x[:, 0] for x in feature] feature = torch.cat(feature, dim=-1)", "network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network =", "transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if", "val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in", "default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int,", "transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path,", "operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has", "*** Warning ***: torch.distributed.all_gather has no gradient. \"\"\" tensors_gather =", "in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return", "help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim',", "batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train memory bank: {} points.\".format(n_train_points))", "batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True,", "val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img, _ = data", "img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img =", "pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size", "True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs all_gather operation", "from torchvision import transforms import argparse from BaseTaskModel.task_network import get_moco_network,", "points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data", "shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset)", "in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx", "for distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\",", "data path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224,", "ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler =", "train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img, _ =", "rank for distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining models\")", "feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx =", "async_op=False) output = torch.cat(tensors_gather, dim=0) return output def main(): parser", "lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def", "= args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network =", "else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for", "== 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim", "default=-1, type=int, help='node rank for distributed parallel') parser.add_argument(\"--task\", type=str, default=\"moco\",", "get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer)", "list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset =", "parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel') parser.add_argument(\"--task\", type=str,", "concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for", "of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel')", "= True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485,", "if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path,", "ImageFile, Image import torch.distributed as dist from lars import *", "get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader from", "parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\")", "args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "'val_memory_bank': val_memory_bank }, args.save_path ) if __name__ == '__main__': main()", "{} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank:", "ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset", "concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0:", "torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank: {} points.\".format(n_val_points)) val_memory_bank =", "import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import", "True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456,", "feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] =", "help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to save the", "non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature =", "models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to save the memory_bank\") parser.add_argument(\"--backbone\",", "with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader): idx,", "transforms import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network,", "train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader): idx, img, _", "elif args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task", "elif args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif", "type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\",", "torch.zeros(n_val_points, feat_dim).to(\"cpu\").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx,", "[torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output =", "tensors. *** Warning ***: torch.distributed.all_gather has no gradient. \"\"\" tensors_gather", "output = torch.cat(tensors_gather, dim=0) return output def main(): parser =", "dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:]", "val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'),", "feature_layer=args.feature_layer) elif args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)", "= get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone,", "train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank)", "provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. \"\"\"", "default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\",", "'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network", "= [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output", "in tqdm(train_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank,", "size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat", "val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank)", "val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points =", "num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train", "Image import torch.distributed as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES", "feature_layer=args.feature_layer) elif args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)", "ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset", "args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "= get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network =", "bank: {} points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory", "= ResNet(50, frozen_stages=4) if args.task == 'moco': network = get_moco_network(pretrained_path,", "feature_layer=args.feature_layer) elif args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)", "dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:]", "frozen_stages=4) if args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif", "args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path", "idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature", "== 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path )", "as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import", "parallel') parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\",", "parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch", "feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank: {} points.\".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points,", "feature_layer=args.feature_layer) elif args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif", "torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad():", "torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader): idx, img,", "get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer)", "parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\")", "= feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank':", "type=str, default=\"\", help=\"the pretraining models\") parser.add_argument(\"--save_path\", type=str, default=\"\", help=\"where to", "type=str, default=\"\", help=\"where to save the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\")", "= argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node", "device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size),", "torchvision import transforms import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network,", "get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import", "***: torch.distributed.all_gather has no gradient. \"\"\" tensors_gather = [torch.ones_like(tensor) for", "= transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,", "import tqdm import torch.backends.cudnn as cudnn import torch from datasets", "transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False,", "feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx)", "val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank,", "torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points =", "= os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim =", "len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True,", "parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path)", "the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the data", "== 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent':", "import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs all_gather operation on", "warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs all_gather operation on the", "train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if __name__ == '__main__':", "torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def", "args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path", "4) feature = [x[:, 0] for x in feature] feature", "import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data", "= torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points", "parser.add_argument(\"--task\", type=str, default=\"moco\", help=\"the pretraining models\") parser.add_argument(\"--pretrained_path\", type=str, default=\"\", help=\"the", "idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data", "network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation =", "ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): \"\"\" Performs", "dim=0) return output def main(): parser = argparse.ArgumentParser(\"The first stage", "args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task ==", "= concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in", "parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args()", "]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset =", "batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network", "from tqdm import tqdm import torch.backends.cudnn as cudnn import torch", "torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader", "pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank])", "concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if", "raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True", "val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print(\"Initializing train memory", "for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather,", "0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset =", "parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image size\")", "NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation", "from torch.utils.data import DataLoader from PIL import ImageFile, Image import", "feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank", "path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int, default=224, help=\"image", "type=int, default=224, help=\"image size\") parser.add_argument(\"--feat_dim\", type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\",", "True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4)", "tqdm(val_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True)", "help=\"the data path\") parser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\") parser.add_argument(\"--img_size\", type=int,", "torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms", "BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel') parser.add_argument(\"--task\",", "sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4)", "help=\"feature layer\") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path)", "'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank)", "= DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size,", "torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if __name__", "torch.distributed.all_gather has no gradient. \"\"\" tensors_gather = [torch.ones_like(tensor) for _", "points.\".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to(\"cpu\").detach() print(\"Initializing val memory bank: {}", "import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor):", "data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl')", "0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset", "network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img, _", "with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0: torch.save(", "network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0]", "args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb',", "type=int, default=128, help=\"feat dimension\") parser.add_argument(\"--feature_layer\", type=str, default='lowdim', help=\"feature layer\") parser.add_argument('--use-lmdb',", "os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim", "get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network = get_dino_network(args.backbone,", "rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points", "feature_layer=args.feature_layer) elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)", "= network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in", "= concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank ==", "'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network", "the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.", "import os from tqdm import tqdm import torch.backends.cudnn as cudnn", "= concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu()", "save the memory_bank\") parser.add_argument(\"--backbone\", type=str, default=\"resnet50\") parser.add_argument(\"--data_path\", type=str, default=\"~/ILSVRC2012/\", help=\"the", "parser = argparse.ArgumentParser(\"The first stage of BoostrapSelfSup\") parser.add_argument('--local_rank', default=-1, type=int,", "0.224, 0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation)", "get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network,", "gradient. \"\"\" tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather,", "torch.cat(tensors_gather, dim=0) return output def main(): parser = argparse.ArgumentParser(\"The first", "in tqdm(val_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank,", "dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if args.task ==", "pin_memory=True, num_workers=4) print(\"Initializing train memory bank: {} points.\".format(n_train_points)) train_memory_bank =" ]
[ "deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for", "pytest from rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content = \"\"\"", "<groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\" client = SimpleMercator(content) deps", "not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d in", "d in deps] assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client", "for d in deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes", "rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content = \"\"\" <project> <dependencies>", "class TestSimpleMercator: pom_xml_content = \"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId>", "<artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> \"\"\" def test_get_dependencies(self): client =", "</project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies>", "<dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId>", "import SimpleMercator class TestSimpleMercator: pom_xml_content = \"\"\" <project> <dependencies> <dependency>", "</dependencies> </project> \"\"\" def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps =", "= \"\"\" </project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency>", "'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d in deps] assert", "<dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId>", "match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content = \"\"\" </project>", "deps = client.get_dependencies() assert len(deps) == 3 artifact_ids = [d.artifact_id", "client.get_dependencies() assert len(deps) == 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty", "'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert", "<dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\" client =", "</project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement>", "def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps)", "TestSimpleMercator: pom_xml_content = \"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency>", "len(deps) == 1 artifact_ids = [d.artifact_id for d in deps]", "<dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId>", "content = \"\"\" </project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId>", "</dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project>", "assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d", "deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for", "\"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId>", "<scope>test</scope> </dependency> </dependencies> </project> \"\"\" def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content)", "from rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content = \"\"\" <project>", "client.get_dependencies() assert len(deps) == 3 artifact_ids = [d.artifact_id for d", "= client.get_dependencies() assert len(deps) == 3 artifact_ids = [d.artifact_id for", "== 3 artifact_ids = [d.artifact_id for d in deps] assert", "= client.get_dependencies() assert len(deps) == 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError,", "assert len(deps) == 1 artifact_ids = [d.artifact_id for d in", "'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d in deps] assert not", "3 artifact_ids = [d.artifact_id for d in deps] assert not", "def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps)", "<dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency>", "= SimpleMercator(content) deps = client.get_dependencies() assert len(deps) == 1 artifact_ids", "deps = client.get_dependencies() assert len(deps) == 0 def test_get_dependencies_with_no_content(self): with", "== 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('')", "SimpleMercator('') def test_find_data_corrupt_pom(self): content = \"\"\" </project> </project> <dependencyManagement> <dependencies>", "= SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps) == 0 def", "'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d in deps] assert not", "</dependencies> </project> \"\"\" client = SimpleMercator(content) deps = client.get_dependencies() assert", "<reponame>anuragtr/fabric8-analytics-rudra import pytest from rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content", "<dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies>", "{'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d in deps]", "{'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies()", "client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps) == 3", "<dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\" client = SimpleMercator(content)", "in deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id", "in deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope", "Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content = \"\"\" </project> </project>", "\"\"\" def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert", "client = SimpleMercator(content) deps = client.get_dependencies() assert len(deps) == 1", "\"\"\" </project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies>", "= [d.artifact_id for d in deps] assert 'art1.id' in artifact_ids", "<artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\" client = SimpleMercator(content) deps =", "d in deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids =", "group_ids = [d.group_id for d in deps] assert not {'grp1.id',", "test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps) ==", "SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps) == 0 def test_get_dependencies_with_no_content(self):", "with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content =", "d in deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes =", "not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps =", "client.get_dependencies() assert len(deps) == 1 artifact_ids = [d.artifact_id for d", "[d.group_id for d in deps] assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids))", "assert len(deps) == 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content", "pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content = \"\"\"", "</dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\"", "deps] assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode())", "len(deps) == 0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'):", "== 1 artifact_ids = [d.artifact_id for d in deps] assert", "</dependency> </dependencies> </project> \"\"\" def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps", "</dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </project> \"\"\" client", "<project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency>", "SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps) == 3 artifact_ids =", "pom_xml_content = \"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency>", "assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d", "0 def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def", "= SimpleMercator(self.pom_xml_content) deps = client.get_dependencies() assert len(deps) == 3 artifact_ids", "def test_find_data_corrupt_pom(self): content = \"\"\" </project> </project> <dependencyManagement> <dependencies> <dependency>", "</dependency> </dependencies> </project> \"\"\" client = SimpleMercator(content) deps = client.get_dependencies()", "= [d.artifact_id for d in deps] assert not {'art1.id', 'art2.id',", "test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content", "assert len(deps) == 3 artifact_ids = [d.artifact_id for d in", "test_find_data_corrupt_pom(self): content = \"\"\" </project> </project> <dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId>", "<dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency>", "[d.artifact_id for d in deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids))", "in deps] assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client =", "client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps) == 0", "<groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId>", "artifact_ids = [d.artifact_id for d in deps] assert 'art1.id' in", "test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps = client.get_dependencies() assert len(deps) ==", "<artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope>", "import pytest from rudra.utils.mercator import SimpleMercator class TestSimpleMercator: pom_xml_content =", "= client.get_dependencies() assert len(deps) == 1 artifact_ids = [d.artifact_id for", "SimpleMercator class TestSimpleMercator: pom_xml_content = \"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId>", "'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d in deps] assert", "</dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> \"\"\" def", "1 artifact_ids = [d.artifact_id for d in deps] assert 'art1.id'", "not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids = [d.group_id for d in", "<artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> \"\"\"", "scopes = [d.scope for d in deps] assert not {'compile',", "def test_get_dependencies_with_no_content(self): with pytest.raises(ValueError, match='Empty Content .*'): SimpleMercator('') def test_find_data_corrupt_pom(self):", "<groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project>", "\"\"\" client = SimpleMercator(content) deps = client.get_dependencies() assert len(deps) ==", "<groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency>", "= [d.group_id for d in deps] assert not {'grp1.id', 'grp2.id',", "SimpleMercator(content) deps = client.get_dependencies() assert len(deps) == 1 artifact_ids =", "for d in deps] assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids)) group_ids", "assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self): client = SimpleMercator('<project></project>'.encode()) deps", ".*'): SimpleMercator('') def test_find_data_corrupt_pom(self): content = \"\"\" </project> </project> <dependencyManagement>", "deps = client.get_dependencies() assert len(deps) == 1 artifact_ids = [d.artifact_id", "</project> \"\"\" def test_get_dependencies(self): client = SimpleMercator(self.pom_xml_content) deps = client.get_dependencies()", "<dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> \"\"\" def test_get_dependencies(self):", "</dependency> <dependency> <groupId>grp2.id</groupId> <artifactId>art2.id</artifactId> </dependency> <dependency> <groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency>", "= \"\"\" <project> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> <dependency> <groupId>grp2.id</groupId>", "= [d.scope for d in deps] assert not {'compile', 'test'}.difference(set(scopes))", "[d.scope for d in deps] assert not {'compile', 'test'}.difference(set(scopes)) def", "for d in deps] assert not {'compile', 'test'}.difference(set(scopes)) def test_get_dependencies_with_no_dependencies(self):", "artifact_ids = [d.artifact_id for d in deps] assert not {'art1.id',", "<artifactId>art1.id</artifactId> </dependency> </dependencies> </dependencyManagement> <dependencies> <dependency> <groupId>grp1.id</groupId> <artifactId>art1.id</artifactId> </dependency> </dependencies>", "</project> \"\"\" client = SimpleMercator(content) deps = client.get_dependencies() assert len(deps)", "<groupId>grp3.id</groupId> <artifactId>art3.id</artifactId> <scope>test</scope> </dependency> </dependencies> </project> \"\"\" def test_get_dependencies(self): client", "len(deps) == 3 artifact_ids = [d.artifact_id for d in deps]", "{'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids)) scopes = [d.scope for d in deps]" ]
[ "import os import sys import unittest from tests.tests_bin_class.test_performance import *", "import unittest from tests.tests_bin_class.test_performance import * if __name__ == \"__main__\":", "import sys import unittest from tests.tests_bin_class.test_performance import * if __name__", "unittest from tests.tests_bin_class.test_performance import * if __name__ == \"__main__\": unittest.main()", "os import sys import unittest from tests.tests_bin_class.test_performance import * if", "sys import unittest from tests.tests_bin_class.test_performance import * if __name__ ==" ]
[ "python from vc3master.task import VC3Task class CheckAllocations(VC3Task): ''' Plugin to", "#!/usr/bin/env python from vc3master.task import VC3Task class CheckAllocations(VC3Task): ''' Plugin", "on Allocations. ''' def runtask(self): ''' ''' self.log.info(\"Running task %s\"", "Plugin to do consistency/sanity checks on Allocations. ''' def runtask(self):", "checks on Allocations. ''' def runtask(self): ''' ''' self.log.info(\"Running task", "to do consistency/sanity checks on Allocations. ''' def runtask(self): '''", "<gh_stars>1-10 #!/usr/bin/env python from vc3master.task import VC3Task class CheckAllocations(VC3Task): '''", "CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity checks on Allocations. '''", "vc3master.task import VC3Task class CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity", "VC3Task class CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity checks on", "class CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity checks on Allocations.", "from vc3master.task import VC3Task class CheckAllocations(VC3Task): ''' Plugin to do", "''' Plugin to do consistency/sanity checks on Allocations. ''' def", "do consistency/sanity checks on Allocations. ''' def runtask(self): ''' '''", "Allocations. ''' def runtask(self): ''' ''' self.log.info(\"Running task %s\" %", "''' def runtask(self): ''' ''' self.log.info(\"Running task %s\" % self.section)", "import VC3Task class CheckAllocations(VC3Task): ''' Plugin to do consistency/sanity checks", "consistency/sanity checks on Allocations. ''' def runtask(self): ''' ''' self.log.info(\"Running" ]
[ "key, val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key', str(key))", "var = etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text = str(val) session", "str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for", "403: \"Cannot use SSL\", 422: \"Invalid XML sent to Airbrake\",", "var.set('key', str(key)) var.text = str(val) if exception: error_em = etree.SubElement(notice_em,", "status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _,", "etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el =", "if len(session): session_em = etree.SubElement(request_em, 'session') for key, val in", "etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em,", "request=None): _, _, trace = sys.exc_info() notice_em = etree.Element('notice', version='2.0')", "_, _, trace = sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb", "Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403:", "} DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL': False, } @property", "notice_em = etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em,", "val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text", "= 'http' if self.settings['USE_SSL']: scheme = 'https' return Client.API_URL %", "request=request) req = Request(self.url, payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT'])", "'http://example.com' if request: request_em = etree.SubElement(notice_em, 'request') if request.is_secure(): scheme", "'var') var.set('key', str(key)) var.text = str(val) session = list(request.session.items()) if", "from six.moves.urllib.request import urlopen, Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices'", "notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text", "'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em", "var = etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text = str(val) if", "'0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if request:", "= etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text = str(val) session =", "in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text =", "'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text =", "session_em = etree.SubElement(request_em, 'session') for key, val in session: var", "= self._generate_xml(exception=exception, request=request) req = Request(self.url, payload, headers) resp =", "True elif status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None,", "etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4'", "'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el", "= etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text =", "} @property def url(self): scheme = 'http' if self.settings['USE_SSL']: scheme", "= etree.SubElement(error_em, 'backtrace') for line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]),", "'var') var.set('key', str(key)) var.text = str(val) if exception: error_em =", "self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def", "else: scheme = 'http' url = '%s://%s%s' % (scheme, request.get_host(),", "len(session): session_em = etree.SubElement(request_em, 'session') for key, val in session:", "str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context' in self.settings: cgi_em", "exception=None, request=None): _, _, trace = sys.exc_info() notice_em = etree.Element('notice',", "Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def notify(self, exception=None, request=None):", "= '0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if", "from django.urls import resolve from lxml import etree from six.moves.urllib.request", "etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text = str(val) session = list(request.session.items())", "'AIRBRAKE', {})) return self._settings def notify(self, exception=None, request=None): headers =", "number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT',", "Request(self.url, payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode()", "traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em,", "= str(url) url_el.text = url cb, _, _ = resolve(request.path)", "= etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if request: request_em =", "settings(self): if getattr(self, '_settings', None): return self._settings self._settings = Client.DEFAULTS", "if getattr(self, '_settings', None): return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings,", "timeout=self.settings['TIMEOUT']) status = resp.getcode() if status == 200: return True", "= traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em =", "status = resp.getcode() if status == 200: return True elif", "'backtrace') for line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2]))", "str(cb.__name__) if 'context' in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for", "% scheme @property def settings(self): if getattr(self, '_settings', None): return", "= '%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url)", "= etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml", "= str(cb.__name__) if 'context' in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data')", "@property def settings(self): if getattr(self, '_settings', None): return self._settings self._settings", "= sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key", "exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em,", "etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception)", "\"Airbrake has braked too hard\", } DEFAULTS = { 'TIMEOUT':", "request=None): headers = { 'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception,", "return True elif status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self,", "= etree.SubElement(notice_em, 'request') if request.is_secure(): scheme = 'https' else: scheme", "request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text = url cb, _,", "val in session: var = etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text", "error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text", "str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for line in tb: etree.SubElement(backtrace_em,", "file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text =", "= 'https' return Client.API_URL % scheme @property def settings(self): if", "def settings(self): if getattr(self, '_settings', None): return self._settings self._settings =", "hard\", } DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL': False, }", "{})) return self._settings def notify(self, exception=None, request=None): headers = {", "resp.getcode() if status == 200: return True elif status in", "trace = sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace)", "if 'context' in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for key,", "def notify(self, exception=None, request=None): headers = { 'Content-Type': 'text/xml' }", "sys import traceback from django.conf import settings from django.urls import", "self._settings def notify(self, exception=None, request=None): headers = { 'Content-Type': 'text/xml'", "headers = { 'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception, request=request)", "version='2.0') tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY']", "status == 200: return True elif status in Client.ERRORS: raise", "= 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el = etree.SubElement(notifier_em, 'url')", "to Airbrake\", 500: \"Airbrake has braked too hard\", } DEFAULTS", "'%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text", "var.text = str(val) session = list(request.session.items()) if len(session): session_em =", "etree.SubElement(error_em, 'backtrace') for line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]),", "for key, val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key',", "etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context'", "'_settings', None): return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {}))", "etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em =", "'cgi-data') for key, val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var')", "= etree.SubElement(request_em, 'session') for key, val in session: var =", "etree.SubElement(error_em, 'message').text = str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for line", "Airbrake\", 500: \"Airbrake has braked too hard\", } DEFAULTS =", "_generate_xml(self, exception=None, request=None): _, _, trace = sys.exc_info() notice_em =", "500: \"Airbrake has braked too hard\", } DEFAULTS = {", "etree.SubElement(request_em, 'cgi-data') for key, val in list(self.settings['context'].items()): var = etree.SubElement(cgi_em,", "_, _ = resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text", "if status == 200: return True elif status in Client.ERRORS:", "resp = urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if status ==", "= str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context' in self.settings:", "backtrace_em = etree.SubElement(error_em, 'backtrace') for line in tb: etree.SubElement(backtrace_em, 'line',", "def _generate_xml(self, exception=None, request=None): _, _, trace = sys.exc_info() notice_em", "if exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__)", "urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if status == 200: return", "'action').text = str(cb.__name__) if 'context' in self.settings: cgi_em = etree.SubElement(request_em,", "exception=None, request=None): headers = { 'Content-Type': 'text/xml' } payload =", "request: request_em = etree.SubElement(notice_em, 'request') if request.is_secure(): scheme = 'https'", "False, } @property def url(self): scheme = 'http' if self.settings['USE_SSL']:", "= etree.SubElement(request_em, 'cgi-data') for key, val in list(self.settings['context'].items()): var =", "'%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: \"Cannot use SSL\", 422: \"Invalid", "'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml version=\"1.0\" encoding=\"UTF-8\"?>%s'", "if request: request_em = etree.SubElement(notice_em, 'request') if request.is_secure(): scheme =", "str(url) url_el.text = url cb, _, _ = resolve(request.path) etree.SubElement(request_em,", "'class').text = str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em = etree.SubElement(error_em,", "self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em,", "'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text", "import urlopen, Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS =", "url = '%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text =", "etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml version=\"1.0\" encoding=\"UTF-8\"?>%s' %", "url_el = etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if request: request_em", "in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _, _,", "'version').text = '0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com'", "if request.is_secure(): scheme = 'https' else: scheme = 'http' url", "import sys import traceback from django.conf import settings from django.urls", "url_el.text = url cb, _, _ = resolve(request.path) etree.SubElement(request_em, 'component').text", "request.is_secure(): scheme = 'https' else: scheme = 'http' url =", "return self._settings def notify(self, exception=None, request=None): headers = { 'Content-Type':", "headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if status", "API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: \"Cannot use SSL\",", "= urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if status == 200:", "etree.SubElement(notice_em, 'request') if request.is_secure(): scheme = 'https' else: scheme =", "line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em =", "'text/xml' } payload = self._generate_xml(exception=exception, request=request) req = Request(self.url, payload,", "str(key)) var.text = str(val) if exception: error_em = etree.SubElement(notice_em, 'error')", "'USE_SSL': False, } @property def url(self): scheme = 'http' if", "import traceback from django.conf import settings from django.urls import resolve", "lxml import etree from six.moves.urllib.request import urlopen, Request class Client(object):", "= { 'TIMEOUT': 5, 'USE_SSL': False, } @property def url(self):", "= str(exception.__class__.__name__) etree.SubElement(error_em, 'message').text = str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace')", "Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _, _, trace = sys.exc_info()", "422: \"Invalid XML sent to Airbrake\", 500: \"Airbrake has braked", "import resolve from lxml import etree from six.moves.urllib.request import urlopen,", "= etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text", "etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text =", "import settings from django.urls import resolve from lxml import etree", "'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception, request=request) req = Request(self.url,", "'message').text = str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for line in", "'http' url = '%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text", "for key, val in session: var = etree.SubElement(session_em, 'var') var.set('key',", "return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings", "self._generate_xml(exception=exception, request=request) req = Request(self.url, payload, headers) resp = urlopen(req,", "etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text", "getattr(self, '_settings', None): return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE',", "sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb = traceback.extract_tb(trace) api_key =", "etree.SubElement(notifier_em, 'url') url_el.text = 'http://example.com' if request: request_em = etree.SubElement(notice_em,", "(scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text = url", "etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml version=\"1.0\"", "resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if", "'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return '<?xml version=\"1.0\" encoding=\"UTF-8\"?>%s' % etree.tostring(notice_em)", "'http' if self.settings['USE_SSL']: scheme = 'https' return Client.API_URL % scheme", "etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text = str(val) if exception: error_em", "= Request(self.url, payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status =", "scheme = 'http' if self.settings['USE_SSL']: scheme = 'https' return Client.API_URL", "django.conf import settings from django.urls import resolve from lxml import", "= { 403: \"Cannot use SSL\", 422: \"Invalid XML sent", "cb, _, _ = resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em,", "etree.SubElement(request_em, 'url').text = str(url) url_el.text = url cb, _, _", "django.urls import resolve from lxml import etree from six.moves.urllib.request import", "tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment')", "ERRORS = { 403: \"Cannot use SSL\", 422: \"Invalid XML", "= 'http' url = '%s://%s%s' % (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em,", "str(key)) var.text = str(val) session = list(request.session.items()) if len(session): session_em", "'request') if request.is_secure(): scheme = 'https' else: scheme = 'http'", "use SSL\", 422: \"Invalid XML sent to Airbrake\", 500: \"Airbrake", "= '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: \"Cannot use SSL\", 422:", "} payload = self._generate_xml(exception=exception, request=request) req = Request(self.url, payload, headers)", "= etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em,", "= url cb, _, _ = resolve(request.path) etree.SubElement(request_em, 'component').text =", "SSL\", 422: \"Invalid XML sent to Airbrake\", 500: \"Airbrake has", "has braked too hard\", } DEFAULTS = { 'TIMEOUT': 5,", "== 200: return True elif status in Client.ERRORS: raise Exception(Client.ERRORS[status])", "{ 'TIMEOUT': 5, 'USE_SSL': False, } @property def url(self): scheme", "= etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text =", "etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context' in self.settings: cgi_em =", "if self.settings['USE_SSL']: scheme = 'https' return Client.API_URL % scheme @property", "'url').text = str(url) url_el.text = url cb, _, _ =", "in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for key, val in", "cgi_em = etree.SubElement(request_em, 'cgi-data') for key, val in list(self.settings['context'].items()): var", "'session') for key, val in session: var = etree.SubElement(session_em, 'var')", "scheme @property def settings(self): if getattr(self, '_settings', None): return self._settings", "env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development') return", "None): return self._settings self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return", "resolve from lxml import etree from six.moves.urllib.request import urlopen, Request", "url cb, _, _ = resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__)", "= etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text = str(val) if exception:", "5, 'USE_SSL': False, } @property def url(self): scheme = 'http'", "'name').text = 'django-airbrake' etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el = etree.SubElement(notifier_em,", "'https' return Client.API_URL % scheme @property def settings(self): if getattr(self,", "= { 'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception, request=request) req", "= self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier') etree.SubElement(notifier_em, 'name').text = 'django-airbrake'", "etree.SubElement(notifier_em, 'version').text = '0.0.4' url_el = etree.SubElement(notifier_em, 'url') url_el.text =", "= list(request.session.items()) if len(session): session_em = etree.SubElement(request_em, 'session') for key,", "self.settings['USE_SSL']: scheme = 'https' return Client.API_URL % scheme @property def", "var.text = str(val) if exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em,", "from lxml import etree from six.moves.urllib.request import urlopen, Request class", "from django.conf import settings from django.urls import resolve from lxml", "= str(val) if exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text", "'url') url_el.text = 'http://example.com' if request: request_em = etree.SubElement(notice_em, 'request')", "Client.API_URL % scheme @property def settings(self): if getattr(self, '_settings', None):", "list(self.settings['context'].items()): var = etree.SubElement(cgi_em, 'var') var.set('key', str(key)) var.text = str(val)", "_, trace = sys.exc_info() notice_em = etree.Element('notice', version='2.0') tb =", "'TIMEOUT': 5, 'USE_SSL': False, } @property def url(self): scheme =", "str(val) session = list(request.session.items()) if len(session): session_em = etree.SubElement(request_em, 'session')", "scheme = 'https' else: scheme = 'http' url = '%s://%s%s'", "settings from django.urls import resolve from lxml import etree from", "= 'https' else: scheme = 'http' url = '%s://%s%s' %", "str(val) if exception: error_em = etree.SubElement(notice_em, 'error') etree.SubElement(error_em, 'class').text =", "XML sent to Airbrake\", 500: \"Airbrake has braked too hard\",", "scheme = 'https' return Client.API_URL % scheme @property def settings(self):", "etree.SubElement(request_em, 'session') for key, val in session: var = etree.SubElement(session_em,", "sent to Airbrake\", 500: \"Airbrake has braked too hard\", }", "\"Cannot use SSL\", 422: \"Invalid XML sent to Airbrake\", 500:", "'https' else: scheme = 'http' url = '%s://%s%s' % (scheme,", "var.set('key', str(key)) var.text = str(val) session = list(request.session.items()) if len(session):", "scheme = 'http' url = '%s://%s%s' % (scheme, request.get_host(), request.get_full_path())", "'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text", "'context' in self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for key, val", "% (scheme, request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text =", "method=str(line[2])) env_em = etree.SubElement(notice_em, 'server-environment') etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development')", "class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: \"Cannot", "payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status = resp.getcode() if", "etree from six.moves.urllib.request import urlopen, Request class Client(object): API_URL =", "import etree from six.moves.urllib.request import urlopen, Request class Client(object): API_URL", "urlopen, Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = {", "api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em = etree.SubElement(notice_em, 'notifier')", "{ 403: \"Cannot use SSL\", 422: \"Invalid XML sent to", "DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL': False, } @property def", "= resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__)", "session = list(request.session.items()) if len(session): session_em = etree.SubElement(request_em, 'session') for", "<gh_stars>0 import sys import traceback from django.conf import settings from", "for line in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em", "traceback from django.conf import settings from django.urls import resolve from", "Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _, _, trace", "list(request.session.items()) if len(session): session_em = etree.SubElement(request_em, 'session') for key, val", "six.moves.urllib.request import urlopen, Request class Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS", "Client(object): API_URL = '%s://airbrake.io/notifier_api/v2/notices' ERRORS = { 403: \"Cannot use", "in session: var = etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text =", "too hard\", } DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL': False,", "self.settings: cgi_em = etree.SubElement(request_em, 'cgi-data') for key, val in list(self.settings['context'].items()):", "= str(exception) backtrace_em = etree.SubElement(error_em, 'backtrace') for line in tb:", "payload = self._generate_xml(exception=exception, request=request) req = Request(self.url, payload, headers) resp", "= Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def notify(self, exception=None,", "raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None): _, _, trace =", "= str(val) session = list(request.session.items()) if len(session): session_em = etree.SubElement(request_em,", "= 'http://example.com' if request: request_em = etree.SubElement(notice_em, 'request') if request.is_secure():", "session: var = etree.SubElement(session_em, 'var') var.set('key', str(key)) var.text = str(val)", "'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text = str(cb.__name__) if 'context' in", "braked too hard\", } DEFAULTS = { 'TIMEOUT': 5, 'USE_SSL':", "in tb: etree.SubElement(backtrace_em, 'line', file=str(line[0]), number=str(line[1]), method=str(line[2])) env_em = etree.SubElement(notice_em,", "key, val in session: var = etree.SubElement(session_em, 'var') var.set('key', str(key))", "url(self): scheme = 'http' if self.settings['USE_SSL']: scheme = 'https' return", "self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def notify(self, exception=None, request=None): headers", "self._settings = Client.DEFAULTS self._settings.update(getattr(settings, 'AIRBRAKE', {})) return self._settings def notify(self,", "req = Request(self.url, payload, headers) resp = urlopen(req, timeout=self.settings['TIMEOUT']) status", "request_em = etree.SubElement(notice_em, 'request') if request.is_secure(): scheme = 'https' else:", "@property def url(self): scheme = 'http' if self.settings['USE_SSL']: scheme =", "def url(self): scheme = 'http' if self.settings['USE_SSL']: scheme = 'https'", "{ 'Content-Type': 'text/xml' } payload = self._generate_xml(exception=exception, request=request) req =", "notify(self, exception=None, request=None): headers = { 'Content-Type': 'text/xml' } payload", "request.get_host(), request.get_full_path()) etree.SubElement(request_em, 'url').text = str(url) url_el.text = url cb,", "tb = traceback.extract_tb(trace) api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY'] notifier_em", "elif status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def _generate_xml(self, exception=None, request=None):", "_ = resolve(request.path) etree.SubElement(request_em, 'component').text = str(cb.__module__) etree.SubElement(request_em, 'action').text =", "url_el.text = 'http://example.com' if request: request_em = etree.SubElement(notice_em, 'request') if", "200: return True elif status in Client.ERRORS: raise Exception(Client.ERRORS[status]) def", "return Client.API_URL % scheme @property def settings(self): if getattr(self, '_settings',", "= resp.getcode() if status == 200: return True elif status", "\"Invalid XML sent to Airbrake\", 500: \"Airbrake has braked too" ]
[ "def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list()", "spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import * from spaceone.inventory.error.custom import", "spaceone.inventory.error import * from spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector']", "= logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def", "self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list() except ConnectionError: _LOGGER.error(ERROR_CONNECTOR(field='Public IP", "= ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs):", "<reponame>jean1042/plugin-azure-cloud-services import logging from spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import", "import * __all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector):", "import * from spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector'] _LOGGER", "import logging from spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import *", "['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs)", "from spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__)", "* __all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def", "* from spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector'] _LOGGER =", "super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list() except ConnectionError: _LOGGER.error(ERROR_CONNECTOR(field='Public", "import AzureConnector from spaceone.inventory.error import * from spaceone.inventory.error.custom import *", "__init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list() except", "logging from spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import * from", "_LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data'))", "SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return", "**kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try: return self.compute_client.snapshots.list() except ConnectionError:", "from spaceone.inventory.error import * from spaceone.inventory.error.custom import * __all__ =", "AzureConnector from spaceone.inventory.error import * from spaceone.inventory.error.custom import * __all__", "class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self): try:", "__all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self,", "spaceone.inventory.error.custom import * __all__ = ['SnapshotConnector'] _LOGGER = logging.getLogger(__name__) class", "logging.getLogger(__name__) class SnapshotConnector(AzureConnector): def __init__(self, **kwargs): super().__init__(**kwargs) self.set_connect(kwargs.get('secret_data')) def list_snapshots(self):", "def list_snapshots(self): try: return self.compute_client.snapshots.list() except ConnectionError: _LOGGER.error(ERROR_CONNECTOR(field='Public IP Address'))", "from spaceone.inventory.libs.connector import AzureConnector from spaceone.inventory.error import * from spaceone.inventory.error.custom" ]
[ "Get a Greeter using the customer as context. Use the", "Settings(punctuation='!!') registry = setup(settings) # *** Default Customer # Make", "f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str = 'Bonjour'", "Settings that say what punctuation to use - Registry -", "a Greeter using the customer as context. Use the Customer", "*the*) greeter - Interact with them Simple wired application: -", "default for greeting return Greeter(punctuation=punctuation) # Register it as a", "as a container. container = registry.create_container() # Get a Greeter", "# Make a FrenchCustomer, pass into the \"greet_customer\" interaction, #", "interaction, # then test the result. french_customer = FrenchCustomer(name='Henri') assert", "from dataclasses import dataclass from wired import ServiceRegistry @dataclass class", "the default greeter, no context def default_greeter_factory(container) -> Greeter: #", "french_greeter_factory, Greeter, context=FrenchCustomer ) return registry def greet_customer(registry: ServiceRegistry, customer:", "(not *the*) greeter - Interact with them Simple wired application:", "punctuation from settings punctuation = settings.punctuation # First the default", "the French greeter, using context of FrenchCustomer def french_greeter_factory(container) ->", "say what punctuation to use - Registry - Two factories", "A customer comes in, handle the steps in the greeting", "hello, one for the FrenchCustomer context - A default Customer", "factory using its class for the \"key\", but # this", "@dataclass class Customer: name: str @dataclass class FrenchCustomer(Customer): pass @dataclass", "french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)", "'Bonjour' def __call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name}", "def french_greeter_factory(container) -> Greeter: # Use the dataclass default for", "Greeter using the customer as context. Use the Customer when", "settings = Settings(punctuation='!!') registry = setup(settings) # *** Default Customer", "then test the result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri", "str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str", "ServiceRegistry, customer: Customer) -> str: # A customer comes in,", "@dataclass class FrenchGreeter(Greeter): greeting: str = 'Bonjour' def __call__(self, customer:", "def __call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}'", "into a store. Do the steps to interact with them:", "no context def default_greeter_factory(container) -> Greeter: # Use the dataclass", "default Customer and FrenchCustomer \"\"\" from dataclasses import dataclass from", "greeting. greeter: Greeter = container.get(Greeter, context=customer) greeting = greeter(customer) return", "factory using its class for the \"key\" registry.register_factory(default_greeter_factory, Greeter) #", "context def default_greeter_factory(container) -> Greeter: # Use the dataclass default", "Make the greeter factories, using punctuation from settings punctuation =", "for greeting return FrenchGreeter(punctuation=punctuation) # Register it as a factory", "setup(settings: Settings) -> ServiceRegistry: # Make the registry registry =", "Customer # Make a Customer, pass into the \"greet_customer\" interaction,", "- A default Customer and FrenchCustomer \"\"\" from dataclasses import", "a store. Do the steps to interact with them: -", ") return registry def greet_customer(registry: ServiceRegistry, customer: Customer) -> str:", "greeting: str = 'Hello' def __call__(self, customer: Customer) -> str:", "punctuation to use - Registry - Two factories that says", "a FrenchCustomer, pass into the \"greet_customer\" interaction, # then test", "that say what punctuation to use - Registry - Two", "str = 'Hello' def __call__(self, customer: Customer) -> str: return", "# First the default greeter, no context def default_greeter_factory(container) ->", "container. container = registry.create_container() # Get a Greeter using the", "register with a \"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return", "- Interact with them Simple wired application: - Settings that", "# *** Default Customer # Make a Customer, pass into", "default greeter, no context def default_greeter_factory(container) -> Greeter: # Use", "= 'Bonjour' def __call__(self, customer: Customer) -> str: return f'{self.greeting}", "Get *a* (not *the*) greeter - Interact with them Simple", "test the result. customer = Customer(name='Mary') assert 'Hello Mary !!'", "using the customer as context. Use the Customer when #", "for the \"key\", but # this time register with a", "Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings)", "# *** French Customer # Make a FrenchCustomer, pass into", "Greeter) # Now the French greeter, using context of FrenchCustomer", "setup(settings) # *** Default Customer # Make a Customer, pass", "French Customer # Make a FrenchCustomer, pass into the \"greet_customer\"", "registry = ServiceRegistry() # Make the greeter factories, using punctuation", "\"key\", but # this time register with a \"context\" registry.register_factory(", "'Hello Mary !!' == greet_customer(registry, customer) # *** French Customer", "= container.get(Greeter, context=customer) greeting = greeter(customer) return greeting def main():", "walks into a store. Do the steps to interact with", "!!' == greet_customer(registry, customer) # *** French Customer # Make", "-> str: # A customer comes in, handle the steps", "*** French Customer # Make a FrenchCustomer, pass into the", "wired import ServiceRegistry @dataclass class Customer: name: str @dataclass class", "= Settings(punctuation='!!') registry = setup(settings) # *** Default Customer #", "return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings) -> ServiceRegistry: #", "__call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' def", "FrenchCustomer \"\"\" from dataclasses import dataclass from wired import ServiceRegistry", "'Hello' def __call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name}", "\"\"\" A customer walks into a store. Do the steps", "steps to interact with them: - Get *a* (not *the*)", "# this time register with a \"context\" registry.register_factory( french_greeter_factory, Greeter,", "use - Registry - Two factories that says hello, one", "time register with a \"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer )", "greeter(customer) return greeting def main(): settings = Settings(punctuation='!!') registry =", "== greet_customer(registry, customer) # *** French Customer # Make a", "<gh_stars>10-100 \"\"\" A customer walks into a store. Do the", "with them Simple wired application: - Settings that say what", "default_greeter_factory(container) -> Greeter: # Use the dataclass default for greeting", "the Customer when # generating the greeting. greeter: Greeter =", "class Greeter: punctuation: str greeting: str = 'Hello' def __call__(self,", "Customer when # generating the greeting. greeter: Greeter = container.get(Greeter,", "def default_greeter_factory(container) -> Greeter: # Use the dataclass default for", "{self.punctuation}' def setup(settings: Settings) -> ServiceRegistry: # Make the registry", "what punctuation to use - Registry - Two factories that", "registry def greet_customer(registry: ServiceRegistry, customer: Customer) -> str: # A", "the greeting # as a container. container = registry.create_container() #", "container = registry.create_container() # Get a Greeter using the customer", "greeting return Greeter(punctuation=punctuation) # Register it as a factory using", "the FrenchCustomer context - A default Customer and FrenchCustomer \"\"\"", "a factory using its class for the \"key\" registry.register_factory(default_greeter_factory, Greeter)", "punctuation = settings.punctuation # First the default greeter, no context", "registry registry = ServiceRegistry() # Make the greeter factories, using", "Customer and FrenchCustomer \"\"\" from dataclasses import dataclass from wired", "greeting = greeter(customer) return greeting def main(): settings = Settings(punctuation='!!')", "a Customer, pass into the \"greet_customer\" interaction, # then test", "pass @dataclass class Settings: punctuation: str @dataclass class Greeter: punctuation:", "f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings) -> ServiceRegistry: # Make", "settings punctuation = settings.punctuation # First the default greeter, no", "Settings) -> ServiceRegistry: # Make the registry registry = ServiceRegistry()", "Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter):", "the result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri !!' ==", "greeting # as a container. container = registry.create_container() # Get", "greeting def main(): settings = Settings(punctuation='!!') registry = setup(settings) #", "result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri !!' == greet_customer(registry,", "str @dataclass class Greeter: punctuation: str greeting: str = 'Hello'", "main(): settings = Settings(punctuation='!!') registry = setup(settings) # *** Default", "it as a factory using its class for the \"key\"", "= ServiceRegistry() # Make the greeter factories, using punctuation from", "return registry def greet_customer(registry: ServiceRegistry, customer: Customer) -> str: #", "store. Do the steps to interact with them: - Get", "to interact with them: - Get *a* (not *the*) greeter", "customer walks into a store. Do the steps to interact", "them Simple wired application: - Settings that say what punctuation", "into the \"greet_customer\" interaction, # then test the result. customer", "ServiceRegistry: # Make the registry registry = ServiceRegistry() # Make", "# Make the registry registry = ServiceRegistry() # Make the", "-> str: return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings) ->", "as a factory using its class for the \"key\" registry.register_factory(default_greeter_factory,", "-> ServiceRegistry: # Make the registry registry = ServiceRegistry() #", "Greeter: # Use the dataclass default for greeting return FrenchGreeter(punctuation=punctuation)", "# Now the French greeter, using context of FrenchCustomer def", "the steps to interact with them: - Get *a* (not", "context=customer) greeting = greeter(customer) return greeting def main(): settings =", "class FrenchCustomer(Customer): pass @dataclass class Settings: punctuation: str @dataclass class", "= 'Hello' def __call__(self, customer: Customer) -> str: return f'{self.greeting}", "str @dataclass class FrenchCustomer(Customer): pass @dataclass class Settings: punctuation: str", "class Settings: punctuation: str @dataclass class Greeter: punctuation: str greeting:", "class for the \"key\" registry.register_factory(default_greeter_factory, Greeter) # Now the French", "dataclass default for greeting return FrenchGreeter(punctuation=punctuation) # Register it as", "def greet_customer(registry: ServiceRegistry, customer: Customer) -> str: # A customer", "its class for the \"key\" registry.register_factory(default_greeter_factory, Greeter) # Now the", "customer comes in, handle the steps in the greeting #", "the \"greet_customer\" interaction, # then test the result. customer =", "# generating the greeting. greeter: Greeter = container.get(Greeter, context=customer) greeting", "return Greeter(punctuation=punctuation) # Register it as a factory using its", "Customer) -> str: # A customer comes in, handle the", "return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str =", "using context of FrenchCustomer def french_greeter_factory(container) -> Greeter: # Use", "assert 'Hello Mary !!' == greet_customer(registry, customer) # *** French", "registry.register_factory(default_greeter_factory, Greeter) # Now the French greeter, using context of", "for the FrenchCustomer context - A default Customer and FrenchCustomer", "FrenchGreeter(Greeter): greeting: str = 'Bonjour' def __call__(self, customer: Customer) ->", "greeter: Greeter = container.get(Greeter, context=customer) greeting = greeter(customer) return greeting", "the steps in the greeting # as a container. container", "def setup(settings: Settings) -> ServiceRegistry: # Make the registry registry", "registry = setup(settings) # *** Default Customer # Make a", "class for the \"key\", but # this time register with", "a \"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return registry def", "Settings: punctuation: str @dataclass class Greeter: punctuation: str greeting: str", "its class for the \"key\", but # this time register", "punctuation: str @dataclass class Greeter: punctuation: str greeting: str =", "Make the registry registry = ServiceRegistry() # Make the greeter", "Use the dataclass default for greeting return FrenchGreeter(punctuation=punctuation) # Register", "with them: - Get *a* (not *the*) greeter - Interact", "Use the Customer when # generating the greeting. greeter: Greeter", "Greeter = container.get(Greeter, context=customer) greeting = greeter(customer) return greeting def", "Greeter, context=FrenchCustomer ) return registry def greet_customer(registry: ServiceRegistry, customer: Customer)", "@dataclass class FrenchCustomer(Customer): pass @dataclass class Settings: punctuation: str @dataclass", "Simple wired application: - Settings that say what punctuation to", "application: - Settings that say what punctuation to use -", "context=FrenchCustomer ) return registry def greet_customer(registry: ServiceRegistry, customer: Customer) ->", "Two factories that says hello, one for the FrenchCustomer context", "that says hello, one for the FrenchCustomer context - A", "Customer # Make a FrenchCustomer, pass into the \"greet_customer\" interaction,", "@dataclass class Settings: punctuation: str @dataclass class Greeter: punctuation: str", "factories that says hello, one for the FrenchCustomer context -", "Do the steps to interact with them: - Get *a*", "{customer.name} {self.punctuation}' def setup(settings: Settings) -> ServiceRegistry: # Make the", "a factory using its class for the \"key\", but #", "registry.create_container() # Get a Greeter using the customer as context.", "import ServiceRegistry @dataclass class Customer: name: str @dataclass class FrenchCustomer(Customer):", "# Use the dataclass default for greeting return FrenchGreeter(punctuation=punctuation) #", "__call__(self, customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass", "ServiceRegistry() # Make the greeter factories, using punctuation from settings", "Customer(name='Mary') assert 'Hello Mary !!' == greet_customer(registry, customer) # ***", "- Get *a* (not *the*) greeter - Interact with them", "greet_customer(registry, customer) # *** French Customer # Make a FrenchCustomer,", "= setup(settings) # *** Default Customer # Make a Customer,", "Use the dataclass default for greeting return Greeter(punctuation=punctuation) # Register", "the customer as context. Use the Customer when # generating", "for the \"key\" registry.register_factory(default_greeter_factory, Greeter) # Now the French greeter,", "\"key\" registry.register_factory(default_greeter_factory, Greeter) # Now the French greeter, using context", "- Registry - Two factories that says hello, one for", "\"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return registry def greet_customer(registry:", "FrenchCustomer def french_greeter_factory(container) -> Greeter: # Use the dataclass default", "{customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str = 'Bonjour' def", "steps in the greeting # as a container. container =", "# as a container. container = registry.create_container() # Get a", "greeter, no context def default_greeter_factory(container) -> Greeter: # Use the", "to use - Registry - Two factories that says hello,", "in the greeting # as a container. container = registry.create_container()", "from settings punctuation = settings.punctuation # First the default greeter,", "A customer walks into a store. Do the steps to", "\"greet_customer\" interaction, # then test the result. french_customer = FrenchCustomer(name='Henri')", "this time register with a \"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer", "dataclasses import dataclass from wired import ServiceRegistry @dataclass class Customer:", "using its class for the \"key\", but # this time", "dataclass from wired import ServiceRegistry @dataclass class Customer: name: str", "# Make a Customer, pass into the \"greet_customer\" interaction, #", "using punctuation from settings punctuation = settings.punctuation # First the", "when # generating the greeting. greeter: Greeter = container.get(Greeter, context=customer)", "= settings.punctuation # First the default greeter, no context def", "Default Customer # Make a Customer, pass into the \"greet_customer\"", "pass into the \"greet_customer\" interaction, # then test the result.", "customer: Customer) -> str: # A customer comes in, handle", "FrenchGreeter(punctuation=punctuation) # Register it as a factory using its class", "# Register it as a factory using its class for", "= Customer(name='Mary') assert 'Hello Mary !!' == greet_customer(registry, customer) #", "of FrenchCustomer def french_greeter_factory(container) -> Greeter: # Use the dataclass", "from wired import ServiceRegistry @dataclass class Customer: name: str @dataclass", "= registry.create_container() # Get a Greeter using the customer as", "-> str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting:", "container.get(Greeter, context=customer) greeting = greeter(customer) return greeting def main(): settings", "factories, using punctuation from settings punctuation = settings.punctuation # First", "into the \"greet_customer\" interaction, # then test the result. french_customer", "French greeter, using context of FrenchCustomer def french_greeter_factory(container) -> Greeter:", "it as a factory using its class for the \"key\",", "Now the French greeter, using context of FrenchCustomer def french_greeter_factory(container)", "greeting return FrenchGreeter(punctuation=punctuation) # Register it as a factory using", "customer as context. Use the Customer when # generating the", "return FrenchGreeter(punctuation=punctuation) # Register it as a factory using its", "# then test the result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour", "FrenchCustomer(Customer): pass @dataclass class Settings: punctuation: str @dataclass class Greeter:", "-> Greeter: # Use the dataclass default for greeting return", "\"greet_customer\" interaction, # then test the result. customer = Customer(name='Mary')", "Customer, pass into the \"greet_customer\" interaction, # then test the", "the \"greet_customer\" interaction, # then test the result. french_customer =", "registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return registry def greet_customer(registry: ServiceRegistry,", "customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' @dataclass class", "french_greeter_factory(container) -> Greeter: # Use the dataclass default for greeting", "but # this time register with a \"context\" registry.register_factory( french_greeter_factory,", "def main(): settings = Settings(punctuation='!!') registry = setup(settings) # ***", "str = 'Bonjour' def __call__(self, customer: Customer) -> str: return", "dataclass default for greeting return Greeter(punctuation=punctuation) # Register it as", "the dataclass default for greeting return FrenchGreeter(punctuation=punctuation) # Register it", "import dataclass from wired import ServiceRegistry @dataclass class Customer: name:", "greeting: str = 'Bonjour' def __call__(self, customer: Customer) -> str:", "interact with them: - Get *a* (not *the*) greeter -", "wired application: - Settings that say what punctuation to use", "context - A default Customer and FrenchCustomer \"\"\" from dataclasses", "greeter - Interact with them Simple wired application: - Settings", "the \"key\" registry.register_factory(default_greeter_factory, Greeter) # Now the French greeter, using", "= greeter(customer) return greeting def main(): settings = Settings(punctuation='!!') registry", "Mary !!' == greet_customer(registry, customer) # *** French Customer #", "then test the result. customer = Customer(name='Mary') assert 'Hello Mary", "# Make the greeter factories, using punctuation from settings punctuation", "says hello, one for the FrenchCustomer context - A default", "customer: Customer) -> str: return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings:", "# then test the result. customer = Customer(name='Mary') assert 'Hello", "Greeter: # Use the dataclass default for greeting return Greeter(punctuation=punctuation)", "the registry registry = ServiceRegistry() # Make the greeter factories,", "interaction, # then test the result. customer = Customer(name='Mary') assert", "context. Use the Customer when # generating the greeting. greeter:", "test the result. french_customer = FrenchCustomer(name='Henri') assert 'Bonjour Henri !!'", "using its class for the \"key\" registry.register_factory(default_greeter_factory, Greeter) # Now", "Register it as a factory using its class for the", "with a \"context\" registry.register_factory( french_greeter_factory, Greeter, context=FrenchCustomer ) return registry", "result. customer = Customer(name='Mary') assert 'Hello Mary !!' == greet_customer(registry,", "one for the FrenchCustomer context - A default Customer and", "First the default greeter, no context def default_greeter_factory(container) -> Greeter:", "# Use the dataclass default for greeting return Greeter(punctuation=punctuation) #", "punctuation: str greeting: str = 'Hello' def __call__(self, customer: Customer)", "the greeter factories, using punctuation from settings punctuation = settings.punctuation", "Make a FrenchCustomer, pass into the \"greet_customer\" interaction, # then", "Customer: name: str @dataclass class FrenchCustomer(Customer): pass @dataclass class Settings:", "the greeting. greeter: Greeter = container.get(Greeter, context=customer) greeting = greeter(customer)", "# A customer comes in, handle the steps in the", "as a factory using its class for the \"key\", but", "str: # A customer comes in, handle the steps in", "default for greeting return FrenchGreeter(punctuation=punctuation) # Register it as a", "greeter factories, using punctuation from settings punctuation = settings.punctuation #", "greet_customer(registry: ServiceRegistry, customer: Customer) -> str: # A customer comes", "comes in, handle the steps in the greeting # as", "*a* (not *the*) greeter - Interact with them Simple wired", "- Settings that say what punctuation to use - Registry", "Greeter(punctuation=punctuation) # Register it as a factory using its class", "in, handle the steps in the greeting # as a", "a container. container = registry.create_container() # Get a Greeter using", "Greeter: punctuation: str greeting: str = 'Hello' def __call__(self, customer:", "the dataclass default for greeting return Greeter(punctuation=punctuation) # Register it", "and FrenchCustomer \"\"\" from dataclasses import dataclass from wired import", "str greeting: str = 'Hello' def __call__(self, customer: Customer) ->", "the result. customer = Customer(name='Mary') assert 'Hello Mary !!' ==", "str: return f'{self.greeting} {customer.name} {self.punctuation}' def setup(settings: Settings) -> ServiceRegistry:", "customer = Customer(name='Mary') assert 'Hello Mary !!' == greet_customer(registry, customer)", "return greeting def main(): settings = Settings(punctuation='!!') registry = setup(settings)", "for greeting return Greeter(punctuation=punctuation) # Register it as a factory", "*** Default Customer # Make a Customer, pass into the", "handle the steps in the greeting # as a container.", "Registry - Two factories that says hello, one for the", "FrenchCustomer context - A default Customer and FrenchCustomer \"\"\" from", "class Customer: name: str @dataclass class FrenchCustomer(Customer): pass @dataclass class", "generating the greeting. greeter: Greeter = container.get(Greeter, context=customer) greeting =", "FrenchCustomer, pass into the \"greet_customer\" interaction, # then test the", "customer) # *** French Customer # Make a FrenchCustomer, pass", "settings.punctuation # First the default greeter, no context def default_greeter_factory(container)", "greeter, using context of FrenchCustomer def french_greeter_factory(container) -> Greeter: #", "the \"key\", but # this time register with a \"context\"", "A default Customer and FrenchCustomer \"\"\" from dataclasses import dataclass", "context of FrenchCustomer def french_greeter_factory(container) -> Greeter: # Use the", "{self.punctuation}' @dataclass class FrenchGreeter(Greeter): greeting: str = 'Bonjour' def __call__(self,", "name: str @dataclass class FrenchCustomer(Customer): pass @dataclass class Settings: punctuation:", "# Get a Greeter using the customer as context. Use", "- Two factories that says hello, one for the FrenchCustomer", "Make a Customer, pass into the \"greet_customer\" interaction, # then", "them: - Get *a* (not *the*) greeter - Interact with", "ServiceRegistry @dataclass class Customer: name: str @dataclass class FrenchCustomer(Customer): pass", "Interact with them Simple wired application: - Settings that say", "\"\"\" from dataclasses import dataclass from wired import ServiceRegistry @dataclass", "@dataclass class Greeter: punctuation: str greeting: str = 'Hello' def", "as context. Use the Customer when # generating the greeting.", "class FrenchGreeter(Greeter): greeting: str = 'Bonjour' def __call__(self, customer: Customer)" ]
[ "self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object def action(self,", "self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time", "__init__(self, gas_field, tech_dict): \"\"\" :param gas_field: a GasField object :param", "and one or more repair methods. Each LDAR program records", "detection and repair methods in the program. The LDAR program", "type(tech.dispatch_object) is Repair: self.repair[tech_name + ' ' + tech.dispatch_object.name] =", ".repair import Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram:", "all detection and repair methods in the program. The LDAR", "repair method :param time: the simulation time object :param gas_field:", "all of the detection methods to be employed by the", "ResultDiscrete, ResultContinuous class LDARProgram: \"\"\" An LDAR program contains one", "a GasField object :param tech_dict: a dict containing all of", "methods to be employed by the LDAR program. The dict", "ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair = {} self.repair_cost = ResultDiscrete(units='USD')", "time.current_time have been repaired. :param time: a FEAST time object", ":param time: a FEAST time object :return: None \"\"\" for", "simulation gas_field object :return: \"\"\" for i, tech in enumerate(self.tech_dict.values()):", "repair costs up to time.current_time, assuming that all reparable emissions", "LDARProgram class. \"\"\" import numpy as np import copy from", "been repaired. :param time: a FEAST time object :return: None", "between detection methods and between detection methods and repair methods", "copy from .repair import Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous", "= ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items(): if type(tech.dispatch_object) is", "action(self, time, gas_field): \"\"\" Runs the detect method for every", "total repair costs up to time.current_time, assuming that all reparable", "GasField object :param tech_dict: a dict containing all of the", "= ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair = {} self.repair_cost =", "and repair methods in the program. The LDAR program deploys", "\"\"\" This module defines the LDARProgram class. \"\"\" import numpy", "and repair method contained in the program. The detection and", "class LDARProgram: \"\"\" An LDAR program contains one or more", "specified for each method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries =", "the simulation time object :param gas_field: the simulation gas_field object", "i, tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval \\", "time.current_time, assuming that all reparable emissions that have a max", "methods of each detection and repair method contained in the", "object :return: None \"\"\" for em in self.emissions.emissions.index.unique(): empdf_temp =", "the find and repair costs associated with all detection and", "def __init__(self, gas_field, tech_dict): \"\"\" :param gas_field: a GasField object", "be defined by the dispatch_objects specified for each method. \"\"\"", "repair methods. Each LDAR program records the find and repair", "is Repair: self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object", "tech_dict self.repair = {} self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech", "methods and one or more repair methods. Each LDAR program", "of the relationships between detection methods and between detection methods", "Calculates the total repair costs up to time.current_time, assuming that", "gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time,", "in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval \\ and np.mod(time.current_time,", "by the LDAR program. The dict must have the form", "in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0]", "\"\"\" :param gas_field: a GasField object :param tech_dict: a dict", "time object :return: None \"\"\" for em in self.emissions.emissions.index.unique(): empdf_temp", "in the program. The detection and repair methods determine their", "and between detection methods and repair methods must be defined", ":param gas_field: a GasField object :param tech_dict: a dict containing", "program. The detection and repair methods determine their own behavior", "\"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries = []", "= tech_dict self.repair = {} self.repair_cost = ResultDiscrete(units='USD') for tech_name,", "costs associated with all detection and repair methods in the", "repair methods must be defined by the dispatch_objects specified for", "1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values():", "between detection methods and repair methods must be defined by", "time object :param gas_field: the simulation gas_field object :return: \"\"\"", "as np import copy from .repair import Repair from ..EmissionSimModules.result_classes", "enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval)", "deploys runs the action methods of each detection and repair", "gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self,", "= self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable &", "of the detection methods to be employed by the LDAR", "ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair = {}", "defines the LDARProgram class. \"\"\" import numpy as np import", "gas_field: the simulation gas_field object :return: \"\"\" for i, tech", "time): \"\"\" Calculates the total repair costs up to time.current_time,", "Runs the detect method for every tech in tech_dict and", "All of the relationships between detection methods and between detection", "= [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict =", "in tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name + ' '", "tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name + ' ' +", ":param gas_field: the simulation gas_field object :return: \"\"\" for i,", "at each time step. \"\"\" def __init__(self, gas_field, tech_dict): \"\"\"", "method contained in the program. The detection and repair methods", "An LDAR program contains one or more detection methods and", "tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for", "step. \"\"\" def __init__(self, gas_field, tech_dict): \"\"\" :param gas_field: a", "hasattr(tech, 'survey_interval') and tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval) < time.delta_t:", "method for every tech in tech_dict and runs the repair", "a max end_time less than time.current_time have been repaired. :param", "methods determine their own behavior at each time step. \"\"\"", "max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time <", "find and repair costs associated with all detection and repair", "dispatch_objects specified for each method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries", "methods must be defined by the dispatch_objects specified for each", "behavior at each time step. \"\"\" def __init__(self, gas_field, tech_dict):", "every tech in tech_dict and runs the repair method :param", "+ tech.dispatch_object.name] = tech.dispatch_object def action(self, time, gas_field): \"\"\" Runs", "LDAR program. The dict must have the form {\"name\": DetectionMethod}.", "repair methods in the program. The LDAR program deploys runs", "records the find and repair costs associated with all detection", "self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict", "up to time.current_time, assuming that all reparable emissions that have", "runs the action methods of each detection and repair method", "self.tech_dict = tech_dict self.repair = {} self.repair_cost = ResultDiscrete(units='USD') for", "np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int)))", "the program. The detection and repair methods determine their own", "in the program. The LDAR program deploys runs the action", "and repair methods must be defined by the dispatch_objects specified", "= copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries = [] #self.emissions_results =", "#self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair", "self.repair = {} self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech in", "or more repair methods. Each LDAR program records the find", "class. \"\"\" import numpy as np import copy from .repair", "detection methods and one or more repair methods. Each LDAR", "object :param gas_field: the simulation gas_field object :return: \"\"\" for", "that have a max end_time less than time.current_time have been", "\"\"\" for em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row =", ":param time: the simulation time object :param gas_field: the simulation", "None \"\"\" for em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row", "one or more repair methods. Each LDAR program records the", "with all detection and repair methods in the program. The", "must be defined by the dispatch_objects specified for each method.", "more repair methods. Each LDAR program records the find and", "action methods of each detection and repair method contained in", "have a max end_time less than time.current_time have been repaired.", "the action methods of each detection and repair method contained", "= {} self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items():", "methods and between detection methods and repair methods must be", "method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries =", "associated with all detection and repair methods in the program.", "object :return: \"\"\" for i, tech in enumerate(self.tech_dict.values()): if hasattr(tech,", "tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time, self.emissions) def", "em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time ==", ":param tech_dict: a dict containing all of the detection methods", "max end_time less than time.current_time have been repaired. :param time:", "and repair costs associated with all detection and repair methods", "methods in the program. The LDAR program deploys runs the", "gas_field: a GasField object :param tech_dict: a dict containing all", "methods and repair methods must be defined by the dispatch_objects", "repair costs associated with all detection and repair methods in", "The detection and repair methods determine their own behavior at", "ResultContinuous class LDARProgram: \"\"\" An LDAR program contains one or", "tech.dispatch_object def action(self, time, gas_field): \"\"\" Runs the detect method", "relationships between detection methods and between detection methods and repair", "each time step. \"\"\" def __init__(self, gas_field, tech_dict): \"\"\" :param", "This module defines the LDARProgram class. \"\"\" import numpy as", "time step. \"\"\" def __init__(self, gas_field, tech_dict): \"\"\" :param gas_field:", "contains one or more detection methods and one or more", "to be employed by the LDAR program. The dict must", "have been repaired. :param time: a FEAST time object :return:", "by the dispatch_objects specified for each method. \"\"\" self.emissions =", "gas_field, tech_dict): \"\"\" :param gas_field: a GasField object :param tech_dict:", "less than time.current_time have been repaired. :param time: a FEAST", "def calc_rep_costs(self, time): \"\"\" Calculates the total repair costs up", "LDAR program contains one or more detection methods and one", "[] self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s')", "np import copy from .repair import Repair from ..EmissionSimModules.result_classes import", "detection and repair methods determine their own behavior at each", "rep in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time): \"\"\" Calculates", "self.emissions_timeseries = [] self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results", "a FEAST time object :return: None \"\"\" for em in", "time: a FEAST time object :return: None \"\"\" for em", "{\"name\": DetectionMethod}. All of the relationships between detection methods and", "\\ and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1,", "ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items(): if type(tech.dispatch_object) is Repair:", "<reponame>GeoSensorWebLab/FEAST_PtE \"\"\" This module defines the LDARProgram class. \"\"\" import", "tech_dict): \"\"\" :param gas_field: a GasField object :param tech_dict: a", "for em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time", "import Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram: \"\"\"", "the LDARProgram class. \"\"\" import numpy as np import copy", "import numpy as np import copy from .repair import Repair", "program records the find and repair costs associated with all", "the total repair costs up to time.current_time, assuming that all", "repair method contained in the program. The detection and repair", "def action(self, time, gas_field): \"\"\" Runs the detect method for", "empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable", "' ' + tech.dispatch_object.name] = tech.dispatch_object def action(self, time, gas_field):", "detection methods and repair methods must be defined by the", "+ ' ' + tech.dispatch_object.name] = tech.dispatch_object def action(self, time,", "time: the simulation time object :param gas_field: the simulation gas_field", "import copy from .repair import Repair from ..EmissionSimModules.result_classes import ResultDiscrete,", "the simulation gas_field object :return: \"\"\" for i, tech in", "'survey_interval') and tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0,", "program deploys runs the action methods of each detection and", "simulation time object :param gas_field: the simulation gas_field object :return:", "than time.current_time have been repaired. :param time: a FEAST time", "each method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries", "if hasattr(tech, 'survey_interval') and tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval) <", "from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram: \"\"\" An LDAR", "determine their own behavior at each time step. \"\"\" def", "object :param tech_dict: a dict containing all of the detection", "reparable emissions that have a max end_time less than time.current_time", "the relationships between detection methods and between detection methods and", "The LDAR program deploys runs the action methods of each", "the form {\"name\": DetectionMethod}. All of the relationships between detection", "for i, tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval", "self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time):", "detection methods and between detection methods and repair methods must", "tech_name, tech in tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name +", "more detection methods and one or more repair methods. Each", "tech_dict: a dict containing all of the detection methods to", "the detection methods to be employed by the LDAR program.", "empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time < time.current_time): self.repair_cost.append_entry([max_row.end_time,", "\"\"\" An LDAR program contains one or more detection methods", "must have the form {\"name\": DetectionMethod}. All of the relationships", "their own behavior at each time step. \"\"\" def __init__(self,", "= empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time < time.current_time):", "in tech_dict and runs the repair method :param time: the", "containing all of the detection methods to be employed by", "DetectionMethod}. All of the relationships between detection methods and between", "[] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict", "numpy as np import copy from .repair import Repair from", "Repair: self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object def", "self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]] max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0] if", "self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries = [] #self.emissions_results", "and runs the repair method :param time: the simulation time", "the repair method :param time: the simulation time object :param", "Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram: \"\"\" An", "and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites,", "time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time))", "own behavior at each time step. \"\"\" def __init__(self, gas_field,", "and tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites", "detect method for every tech in tech_dict and runs the", "< time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field,", "and repair methods determine their own behavior at each time", "to time.current_time, assuming that all reparable emissions that have a", "#self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair = {} self.repair_cost", "or more detection methods and one or more repair methods.", "\"\"\" Calculates the total repair costs up to time.current_time, assuming", "FEAST time object :return: None \"\"\" for em in self.emissions.emissions.index.unique():", "defined by the dispatch_objects specified for each method. \"\"\" self.emissions", "for rep in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time): \"\"\"", "the dispatch_objects specified for each method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions)", "copy.deepcopy(gas_field.emissions) self.emissions_timeseries = [] self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s')", "tech in tech_dict and runs the repair method :param time:", "method :param time: the simulation time object :param gas_field: the", "tech in tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name + '", "for each method. \"\"\" self.emissions = copy.deepcopy(gas_field.emissions) self.emissions_timeseries = []", "form {\"name\": DetectionMethod}. All of the relationships between detection methods", "\"\"\" def __init__(self, gas_field, tech_dict): \"\"\" :param gas_field: a GasField", "time, gas_field): \"\"\" Runs the detect method for every tech", ":return: \"\"\" for i, tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval')", "self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time): \"\"\" Calculates the total", "assuming that all reparable emissions that have a max end_time", "== empdf_temp.end_time.max()].iloc[0] if max_row.reparable & (max_row.end_time < time.current_time): self.repair_cost.append_entry([max_row.end_time, max_row.repair_cost])", "LDAR program records the find and repair costs associated with", "..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class LDARProgram: \"\"\" An LDAR program", "\"\"\" Runs the detect method for every tech in tech_dict", "{} self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items(): if", "tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and tech.survey_interval \\ and", "be employed by the LDAR program. The dict must have", "that all reparable emissions that have a max end_time less", "detection methods to be employed by the LDAR program. The", "for tech_name, tech in tech_dict.items(): if type(tech.dispatch_object) is Repair: self.repair[tech_name", "module defines the LDARProgram class. \"\"\" import numpy as np", "the LDAR program. The dict must have the form {\"name\":", "methods. Each LDAR program records the find and repair costs", "the detect method for every tech in tech_dict and runs", "LDAR program deploys runs the action methods of each detection", "dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in self.repair.values(): rep.repair(time, self.emissions)", "in self.repair.values(): rep.repair(time, self.emissions) def calc_rep_costs(self, time): \"\"\" Calculates the", "\"\"\" for i, tech in enumerate(self.tech_dict.values()): if hasattr(tech, 'survey_interval') and", "self.emissions) def calc_rep_costs(self, time): \"\"\" Calculates the total repair costs", "LDARProgram: \"\"\" An LDAR program contains one or more detection", "gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep", "' + tech.dispatch_object.name] = tech.dispatch_object def action(self, time, gas_field): \"\"\"", "\"\"\" import numpy as np import copy from .repair import", "for every tech in tech_dict and runs the repair method", "- 1, gas_field.n_sites, dtype=int))) tech.detect(time, gas_field, self.emissions.get_current_emissions(time)) for rep in", "each detection and repair method contained in the program. The", "all reparable emissions that have a max end_time less than", "import ResultDiscrete, ResultContinuous class LDARProgram: \"\"\" An LDAR program contains", "program contains one or more detection methods and one or", "tech_dict and runs the repair method :param time: the simulation", "tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int))) tech.detect(time,", "gas_field object :return: \"\"\" for i, tech in enumerate(self.tech_dict.values()): if", "runs the repair method :param time: the simulation time object", "calc_rep_costs(self, time): \"\"\" Calculates the total repair costs up to", "tech.survey_interval \\ and np.mod(time.current_time, tech.survey_interval) < time.delta_t: tech.action(list(np.linspace(0, gas_field.n_sites -", "have the form {\"name\": DetectionMethod}. All of the relationships between", "= [] self.vents_timeseries = [] #self.emissions_results = ResultContinuous(units='g/s') #self.vents_results =", "from .repair import Repair from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous class", "one or more detection methods and one or more repair", "dict must have the form {\"name\": DetectionMethod}. All of the", "gas_field): \"\"\" Runs the detect method for every tech in", "repair methods determine their own behavior at each time step.", "self.repair_cost = ResultDiscrete(units='USD') for tech_name, tech in tech_dict.items(): if type(tech.dispatch_object)", "a dict containing all of the detection methods to be", "Each LDAR program records the find and repair costs associated", "contained in the program. The detection and repair methods determine", "= ResultContinuous(units='g/s') #self.vents_results = ResultContinuous(units='g/s') self.tech_dict = tech_dict self.repair =", "if type(tech.dispatch_object) is Repair: self.repair[tech_name + ' ' + tech.dispatch_object.name]", "costs up to time.current_time, assuming that all reparable emissions that", "employed by the LDAR program. The dict must have the", "end_time less than time.current_time have been repaired. :param time: a", ":return: None \"\"\" for em in self.emissions.emissions.index.unique(): empdf_temp = self.emissions.emissions.loc[[em]]", "= tech.dispatch_object def action(self, time, gas_field): \"\"\" Runs the detect", "The dict must have the form {\"name\": DetectionMethod}. All of", "repaired. :param time: a FEAST time object :return: None \"\"\"", "detection and repair method contained in the program. The detection", "of each detection and repair method contained in the program.", "program. The LDAR program deploys runs the action methods of", "emissions that have a max end_time less than time.current_time have", "program. The dict must have the form {\"name\": DetectionMethod}. All", "rep.repair(time, self.emissions) def calc_rep_costs(self, time): \"\"\" Calculates the total repair", "the program. The LDAR program deploys runs the action methods", "dict containing all of the detection methods to be employed", "tech.dispatch_object.name] = tech.dispatch_object def action(self, time, gas_field): \"\"\" Runs the" ]
[ "use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator", "y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self, y_true,", "train on real and synthetic images loss_weights_D = [0.5] #", "test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = [] self.B_test =", "self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A = Input(shape=self.image_shapeA,", "sometimes send images from B to G_A2B (and the opposite)", "compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G,", "the discriminator learning rate should be decreased self.use_patchgan = opt.use_patchgan", "self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B self.lambda_2 =", "* 1, 3) self.image_shapeB_in = (None, None, 3) # Identity", "self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # =======", "1, 3) self.image_shapeA_in = (None, None, 3) if self.task ==", "self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # ======= Discriminator model", "print('supervised_loss: ', self.supervised_loss) def lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred,", "print('learning date D: ', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning:", "might affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised learning", "software; you can redistribute it and/or modify it under the", "Cycle GAN with parameters ...') print('task: ', self.task) print('generator architecture:", "avoid falsy keras error about weight descripancies self.D_A_static = Network(inputs=image_A,", "loss=compile_losses, loss_weights=compile_weights) # ======= Data ========== # Use 'None' to", "images from B to G_A2B (and the opposite) to teach", "# ============= Generator models ======================= # Do note update discriminator", "print('Data has been loaded') def load_model_and_weights(self, model, weights_path, iteration, by_name):", "else: self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3)", "self.im_h) print('learning date G: ', self.learning_rate_G) print('learning date D: ',", "Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # =============", "= 1000 if self.use_data_generator: print('--- Using dataloader during training ---')", "not self.use_data_generator: print('Data has been loaded') def load_model_and_weights(self, model, weights_path,", "print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization)", "warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #", "1000 nr_B_test_imgs = 1000 if self.use_data_generator: print('--- Using dataloader during", "synthetic images # Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G =", "= opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer", "can redistribute it and/or modify it under the terms of", "', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self, y_true, y_pred): loss", "self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A self.lambda_D =", "will be done each time the iteration number is divisable", "print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1:", "Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2)", "outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D,", "= opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 =", "self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2)", "1000 if self.use_data_generator: print('--- Using dataloader during training ---') else:", "it will be useful, but WITHOUT ANY WARRANTY; without even", "image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A = D_A(image_A) guess_B", "# 0.5 since we train on real and synthetic images", "= opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w * 1,", "1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task self.im_w =", "model ========== if self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan,", "guess_B = D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B =", "and/or modify it under the terms of the BSD 0-Clause", "print('task: ', self.task) print('generator architecture: ', self.generator_architecture) print('image width: ',", "of transpose convolution in deconvolution layers (uk) - can reduce", "str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self):", "load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True)", "(and the opposite) to teach identity mappings self.use_identity_learning = opt.use_identity_learning", "compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model')", "self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight)", "self.img_shape[-1] # Fetch data during training instead of pre caching", "if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model =", "= self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs =", "use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B =", "self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse,", "loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to avoid falsy", "builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B", "Huawei Technologies Co., Ltd. All rights reserved. #This program is", "self.lambda_D = opt.lambda_D # Weight for loss from discriminator guess", "FITNESS FOR A PARTICULAR PURPOSE. # See the BSD 0-Clause", "reserved. #This program is free software; you can redistribute it", "# See the BSD 0-Clause License for more details. from", "if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE')", "number is divisable with this number # PatchGAN - if", "== 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else:", "* class CycleGAN(): def __init__(self, opt, image_shape=(256 * 1, 256", "= opt.lambda_D # Weight for loss from discriminator guess on", "#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.", "Generators if self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA,", "self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss", "generator training self.D_A_static.trainable = False self.D_B_static.trainable = False # Generators", "free software; you can redistribute it and/or modify it under", "# # ======= Discriminator model ========== if self.generator_architecture == 'ICCV':", "train on real and synthetic images # Discriminator builds image_A", "print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss:", "self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A,", "(None, None, 3) else: self.image_shapeB = (opt.im_w * 1, opt.im_h", "use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA,", "import * from models.unet import * from keras.engine.topology import Network", "A PARTICULAR PURPOSE. # See the BSD 0-Clause License for", "self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will", "self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model')", "= (None, None, 3) # Identity loss - sometimes send", "opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done", "self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if", "- if false the discriminator learning rate should be decreased", "', self.learning_rate_G) print('learning date D: ', self.learning_rate_D) print('use patchGAN: ',", "Technologies Co., Ltd. All rights reserved. #This program is free", "== 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB,", "under the terms of the BSD 0-Clause License. #This program", "tf from utilities.data_loader import * class CycleGAN(): def __init__(self, opt,", "# Use Networks to avoid falsy keras error about weight", "PURPOSE. # See the BSD 0-Clause License for more details.", "if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) #", "= (None, None, 3) else: self.image_shapeB = (opt.im_w * 1,", "', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization:", "', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ',", "= [] self.B_test = [] self.A_train = [] self.B_train =", "- can reduce checkerboard artifacts but the blurring might affect", "add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm,", "Supervised learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss", "use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm,", "= opt.data_root self.img_shape = image_shape self.channels = self.img_shape[-1] # Fetch", "utilities.data_loader import * class CycleGAN(): def __init__(self, opt, image_shape=(256 *", "= modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D", "========== # Use 'None' to fetch all available images nr_A_test_imgs", "if opt.clipvalue is not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2,", "* 1, 3) self.image_shapeA_in = (None, None, 3) if self.task", "opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size = 1 self.clipvalue = opt.clipvalue", "self.supervised_loss) def lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return", "3) if self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w * 1,", "self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data ========== # Use 'None'", "# Only store test images if opt.task == 'Vimeo2Long_SID': self.A_test,", "= opt.im_w self.im_h = opt.im_h self.data_root = opt.data_root self.img_shape =", "3) self.image_shapeA_in = (None, None, 3) if self.task == 'Long2Short_raw':", "all available images nr_A_test_imgs = 1000 nr_B_test_imgs = 1000 if", "= self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses", "loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to avoid", "self.epsilon_norm = opt.epsilon_norm # self.crop_res = opt.crop_res # Resize convolution", "======= Discriminator model ========== if self.generator_architecture == 'ICCV': D_A =", "weights during generator training self.D_A_static.trainable = False self.D_B_static.trainable = False", "opt.beta_2 self.batch_size = 1 self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm", "1, opt.im_h * 1, 1) self.image_shapeB_in = (None, None, 3)", "print('--- Using dataloader during training ---') else: print('--- Caching data", "clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G,", "* from models.unet import * from keras.engine.topology import Network import", "== 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B =", "that it will be useful, but WITHOUT ANY WARRANTY; without", "Use 'None' to fetch all available images nr_A_test_imgs = 1000", "print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning:", "will be useful, but WITHOUT ANY WARRANTY; without even the", "self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A =", "images loss_weights_D = [0.5] # 0.5 since we train on", "self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA", "BSD 0-Clause License. #This program is distributed in the hope", "if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs,", "= self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A =", "name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator models", "Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue)", "3), load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task self.im_w = opt.im_w", "', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ',", "during training instead of pre caching all images self.use_data_generator =", "= opt.lambda_1 # Cyclic loss weight A_2_B self.lambda_2 = opt.lambda_2", "loss_weights_D = [0.5] # 0.5 since we train on real", "model.name + '_weights_epoch_' + str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name))", "models.unet import * from keras.engine.topology import Network import sys import", "): self.task = opt.task self.im_w = opt.im_w self.im_h = opt.im_h", "self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if", "self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeA_in", "loss_weights=compile_weights) # ======= Data ========== # Use 'None' to fetch", "unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5 since", "model, weights_path, iteration, by_name): name = model.name + '_weights_epoch_' +", "be decreased self.use_patchgan = opt.use_patchgan self.normalization = normalization # Loss", "= [] self.A_train = [] self.B_train = [] if not", "D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True)", "print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def", "instead of pre caching all images self.use_data_generator = True self.generator_architecture", "= opt.crop_res # Resize convolution - instead of transpose convolution", "train on real and synthetic images elif self.generator_architecture == 'unet_mini':", "and synthetic images elif self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA,", "if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D,", "self.normalization = normalization # Loss hyperparameters self.lambda_1 = opt.lambda_1 #", "reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss,", "= Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # ======= Discriminator model ==========", "the BSD 0-Clause License. #This program is distributed in the", "= modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5 since", "= opt.beta_2 self.batch_size = 1 self.clipvalue = opt.clipvalue self.epsilon_norm =", "synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A", "self.D_A_static.trainable = False self.D_B_static.trainable = False # Generators if self.generator_architecture", "the hope that it will be useful, but WITHOUT ANY", "dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B", "self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')", "during generator training self.D_A_static.trainable = False self.D_B_static.trainable = False #", "y_true)) return loss def cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred", "= D_A(image_A) guess_B = D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')", "use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE')", "[self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D]", "Adam from models.ICCV_architectures import * from models.unet import * from", "print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2:", "def cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred - y_true)) return", "License. #This program is distributed in the hope that it", "= get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = [] self.B_test = []", "parameters ...') print('task: ', self.task) print('generator architecture: ', self.generator_architecture) print('image", "and synthetic images loss_weights_D = [0.5] # 0.5 since we", "self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D)", "', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ',", "self.A_train = [] self.B_train = [] if not self.use_data_generator: print('Data", "lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def", "= normalization # Loss hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic", "# Identity loss - sometimes send images from B to", "this number # PatchGAN - if false the discriminator learning", "hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B self.lambda_2", "WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "(uk) - can reduce checkerboard artifacts but the blurring might", "terms of the BSD 0-Clause License. #This program is distributed", "been loaded') def load_model_and_weights(self, model, weights_path, iteration, by_name): name =", "opt.lambda_2 # Cyclic loss weight B_2_A self.lambda_D = opt.lambda_D #", "'None' to fetch all available images nr_A_test_imgs = 1000 nr_B_test_imgs", "opt.lambda_1 # Cyclic loss weight A_2_B self.lambda_2 = opt.lambda_2 #", "epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G,", "= Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic", "Fetch data during training instead of pre caching all images", "model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle GAN with parameters ...')", "learning rate should be decreased self.use_patchgan = opt.use_patchgan self.normalization =", "normalization=InstanceNormalization, ): self.task = opt.task self.im_w = opt.im_w self.im_h =", "if self.use_data_generator: print('--- Using dataloader during training ---') else: print('---", "def lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss", "rates self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1", "the iteration number is divisable with this number # PatchGAN", "with this number # PatchGAN - if false the discriminator", "by_name=by_name) def print_info(self): print('fInitializing Cycle GAN with parameters ...') print('task:", "'{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle GAN with parameters", "about weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static =", "use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5 since we train on", "import Adam from models.ICCV_architectures import * from models.unet import *", "training self.D_A_static.trainable = False self.D_B_static.trainable = False # Generators if", "self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss ==", "from keras.engine.topology import Network import sys import tensorflow as tf", "import * from keras.engine.topology import Network import sys import tensorflow", "self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator models =======================", "images nr_A_test_imgs = 1000 nr_B_test_imgs = 1000 if self.use_data_generator: print('---", "teach identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus #", "for loss from discriminator guess on synthetic images # Learning", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "load_model_and_weights(self, model, weights_path, iteration, by_name): name = model.name + '_weights_epoch_'", "self.learning_rate_G) print('learning date D: ', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan)", "patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus:", "reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B]", "= (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeA_in =", "optimizer if opt.clipvalue is not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1,", "= [0.5] # 0.5 since we train on real and", "modify it under the terms of the BSD 0-Clause License.", "sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size,", "images # Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G", "artifacts but the blurring might affect the cycle-consistency self.use_resize_convolution =", "from keras.optimizers import Adam from models.ICCV_architectures import * from models.unet", "os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle GAN", "learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss =", "(opt.im_w * 1, opt.im_h * 1, 1) self.image_shapeB_in = (None,", "# Use 'None' to fetch all available images nr_A_test_imgs =", "self.supervised_loss = opt.supervised_loss # optimizer if opt.clipvalue is not None:", "images if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names =", "print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight:", "== 'Long2Short_raw': self.image_shapeB = (opt.im_w * 1, opt.im_h * 1,", "time the iteration number is divisable with this number #", "# Discriminator builds image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A", "= model.name + '_weights_epoch_' + str(iteration) final_path = os.path.join(root, weights_path,", "fetch all available images nr_A_test_imgs = 1000 nr_B_test_imgs = 1000", "= opt.use_resize_convolution # Supervised learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight", "if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A", "epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5 since we train", "self.image_shapeA_in = (None, None, 3) if self.task == 'Long2Short_raw': self.image_shapeB", "self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2)", "compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights)", "self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to avoid falsy keras", "256 * 1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task", "in deconvolution layers (uk) - can reduce checkerboard artifacts but", "on real and synthetic images # Discriminator builds image_A =", "real and synthetic images elif self.generator_architecture == 'unet_mini': D_A =", "real and synthetic images # Discriminator builds image_A = Input(self.image_shapeA)", "self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeB_in", "modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB,", "- sometimes send images from B to G_A2B (and the", "'_weights_epoch_' + str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name)", "Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # ======= Discriminator model ========== if", "= self.img_shape[-1] # Fetch data during training instead of pre", "== 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A", "== 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv,", "is free software; you can redistribute it and/or modify it", "synthetic images loss_weights_D = [0.5] # 0.5 since we train", "else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks", "Identity mapping will be done each time the iteration number", "None, 3) if self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w *", "G: ', self.learning_rate_G) print('learning date D: ', self.learning_rate_D) print('use patchGAN:", "', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ',", "to teach identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus", "self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self, y_true,", "it and/or modify it under the terms of the BSD", "training ---') else: print('--- Caching data ---') sys.stdout.flush() if load_training_data:", "self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res = opt.crop_res", "self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = []", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the", "= opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size = 1 self.clipvalue =", "loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use", "opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w * 1, opt.im_h", "Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse,", "discriminator weights during generator training self.D_A_static.trainable = False self.D_B_static.trainable =", "redistribute it and/or modify it under the terms of the", "Loss hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B", "[0.5] # 0.5 since we train on real and synthetic", "cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised learning part self.use_supervised_learning =", "reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1,", "self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D)", "opposite) to teach identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus =", "= [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A)", "iteration number is divisable with this number # PatchGAN -", "all images self.use_data_generator = True self.generator_architecture = opt.generator_architecture self.use_norm =", "add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') #", "Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data", "self.D_B_static.trainable = False # Generators if self.generator_architecture == 'ICCV': self.G_A2B", "data ---') sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task,", "is divisable with this number # PatchGAN - if false", "self.image_shapeB_in = (None, None, 3) # Identity loss - sometimes", "since we train on real and synthetic images # Discriminator", "- instead of transpose convolution in deconvolution layers (uk) -", "keras.engine.topology import Network import sys import tensorflow as tf from", "======================= # Do note update discriminator weights during generator training", "more details. from keras.optimizers import Adam from models.ICCV_architectures import *", "2020. Huawei Technologies Co., Ltd. All rights reserved. #This program", "keras.optimizers import Adam from models.ICCV_architectures import * from models.unet import", "self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size = 1 self.clipvalue", "self.image_shapeB_in = (None, None, 3) else: self.image_shapeB = (opt.im_w *", "convolution - instead of transpose convolution in deconvolution layers (uk)", "else: self.A_test = [] self.B_test = [] self.A_train = []", "self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self, y_true, y_pred): loss =", "PatchGAN - if false the discriminator learning rate should be", "self.beta_1, self.beta_2) # # ======= Discriminator model ========== if self.generator_architecture", "weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle GAN with", "= opt.supervised_loss # optimizer if opt.clipvalue is not None: self.opt_D", "as tf from utilities.data_loader import * class CycleGAN(): def __init__(self,", "', self.supervised_loss) def lse(self, y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true))", "self.use_data_generator = True self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv", "opt.use_resize_convolution # Supervised learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight =", "caching all images self.use_data_generator = True self.generator_architecture = opt.generator_architecture self.use_norm", "1, opt.im_h * 1, 3) self.image_shapeB_in = (None, None, 3)", "if self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB,", "modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5 since we", "root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store test images if", "Identity loss - sometimes send images from B to G_A2B", "self.beta_2 = opt.beta_2 self.batch_size = 1 self.clipvalue = opt.clipvalue self.epsilon_norm", "Discriminator model ========== if self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA,", "training instead of pre caching all images self.use_data_generator = True", "= Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D,", "the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised learning part self.use_supervised_learning", "on synthetic images # Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G", "'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan,", "we train on real and synthetic images loss_weights_D = [0.5]", "print('--- Caching data ---') sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator", "images # Discriminator builds image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB)", "#This program is distributed in the hope that it will", "# optimizer if opt.clipvalue is not None: self.opt_D = Adam(self.learning_rate_D,", "= True self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv =", "hope that it will be useful, but WITHOUT ANY WARRANTY;", "self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7,", "3) self.image_shapeB_in = (None, None, 3) # Identity loss -", "= opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer if opt.clipvalue is", "= False self.D_B_static.trainable = False # Generators if self.generator_architecture ==", "synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic", "self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) # # ======= Discriminator", "program is distributed in the hope that it will be", "use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5 since we train", "synthetic images # Discriminator builds image_A = Input(self.image_shapeA) image_B =", "= Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) #", "nr_A_test_imgs = 1000 nr_B_test_imgs = 1000 if self.use_data_generator: print('--- Using", "* 1, opt.im_h * 1, 3) self.image_shapeA_in = (None, None,", "print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D:", "= [] self.B_train = [] if not self.use_data_generator: print('Data has", "import * class CycleGAN(): def __init__(self, opt, image_shape=(256 * 1,", "self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2)", "', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ',", "import sys import tensorflow as tf from utilities.data_loader import *", "self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic)", "loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to avoid falsy keras error", "height: ', self.im_h) print('learning date G: ', self.learning_rate_G) print('learning date", "cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred - y_true)) return loss", "self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm,", "---') sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root,", "'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB,", "self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w * 1, opt.im_h *", "guess_A = D_A(image_A) guess_B = D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A,", "# Cyclic loss weight B_2_A self.lambda_D = opt.lambda_D # Weight", "= 1 self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res", "part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss", "opt.data_root self.img_shape = image_shape self.channels = self.img_shape[-1] # Fetch data", "self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B)", "opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs)", "= Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2,", "from models.unet import * from keras.engine.topology import Network import sys", "= opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA =", "Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2) # #", "output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G,", "data during training instead of pre caching all images self.use_data_generator", "0.5 since we train on real and synthetic images #", "final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing", "print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1:", "self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else:", "descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B,", "loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) #", "output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif", "loss from discriminator guess on synthetic images # Learning rates", "of pre caching all images self.use_data_generator = True self.generator_architecture =", "the BSD 0-Clause License for more details. from keras.optimizers import", "with parameters ...') print('task: ', self.task) print('generator architecture: ', self.generator_architecture)", "since we train on real and synthetic images loss_weights_D =", "from B to G_A2B (and the opposite) to teach identity", "elif self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm,", "to avoid falsy keras error about weight descripancies self.D_A_static =", "if not self.use_data_generator: print('Data has been loaded') def load_model_and_weights(self, model,", "epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization,", "name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution,", "name='D_B_static_model') # ============= Generator models ======================= # Do note update", "1, 1) self.image_shapeB_in = (None, None, 3) else: self.image_shapeB =", "use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5 since we", "program is free software; you can redistribute it and/or modify", "Input(self.image_shapeB) guess_A = D_A(image_A) guess_B = D_B(image_B) self.D_A = Model(inputs=image_A,", "...') print('task: ', self.task) print('generator architecture: ', self.generator_architecture) print('image width:", "= self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B =", "opt.im_h self.data_root = opt.data_root self.img_shape = image_shape self.channels = self.img_shape[-1]", "self.B_train = [] if not self.use_data_generator: print('Data has been loaded')", "tensorflow as tf from utilities.data_loader import * class CycleGAN(): def", "disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] #", "self.task) print('generator architecture: ', self.generator_architecture) print('image width: ', self.im_w) print('image", "(opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeA_in = (None,", "compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses,", "self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) #", "self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size", "details. from keras.optimizers import Adam from models.ICCV_architectures import * from", "real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B)", "= self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss,", "reduce checkerboard artifacts but the blurring might affect the cycle-consistency", "# Fetch data during training instead of pre caching all", "__init__(self, opt, image_shape=(256 * 1, 256 * 1, 3), load_training_data=True,", "= D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B,", "weights_path, iteration, by_name): name = model.name + '_weights_epoch_' + str(iteration)", "self.im_w) print('image height: ', self.im_h) print('learning date G: ', self.learning_rate_G)", "= False # Generators if self.generator_architecture == 'ICCV': self.G_A2B =", "error about weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static", "0.5 since we train on real and synthetic images loss_weights_D", "class CycleGAN(): def __init__(self, opt, image_shape=(256 * 1, 256 *", "self.B_test = [] self.A_train = [] self.B_train = [] if", "width: ', self.im_w) print('image height: ', self.im_h) print('learning date G:", "= Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1,", "real_A = Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B =", "models ======================= # Do note update discriminator weights during generator", "= unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if", "images self.use_data_generator = True self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm", "loss_weights=loss_weights_D) # Use Networks to avoid falsy keras error about", "3) else: self.image_shapeB = (opt.im_w * 1, opt.im_h * 1,", "deconvolution layers (uk) - can reduce checkerboard artifacts but the", "Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A = D_A(image_A) guess_B = D_B(image_B)", "image_B = Input(self.image_shapeB) guess_A = D_A(image_A) guess_B = D_B(image_B) self.D_A", "can reduce checkerboard artifacts but the blurring might affect the", "self.data_root = opt.data_root self.img_shape = image_shape self.channels = self.img_shape[-1] #", "# self.crop_res = opt.crop_res # Resize convolution - instead of", "Do note update discriminator weights during generator training self.D_A_static.trainable =", "self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B", "[] self.A_train = [] self.B_train = [] if not self.use_data_generator:", "else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G = Adam(self.learning_rate_G, self.beta_1,", "= opt.add_extra_conv self.image_shapeA = (opt.im_w * 1, opt.im_h * 1,", "self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w *", "self.channels = self.img_shape[-1] # Fetch data during training instead of", "discriminator learning rate should be decreased self.use_patchgan = opt.use_patchgan self.normalization", "Cyclic loss weight B_2_A self.lambda_D = opt.lambda_D # Weight for", "available images nr_A_test_imgs = 1000 nr_B_test_imgs = 1000 if self.use_data_generator:", "---') else: print('--- Caching data ---') sys.stdout.flush() if load_training_data: if", "[] self.B_train = [] if not self.use_data_generator: print('Data has been", "* 1, opt.im_h * 1, 3) self.image_shapeB_in = (None, None,", "input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA,", "and synthetic images # Discriminator builds image_A = Input(self.image_shapeA) image_B", "D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B,", "print('learning date G: ', self.learning_rate_G) print('learning date D: ', self.learning_rate_D)", "load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store test images", "dataloader during training ---') else: print('--- Caching data ---') sys.stdout.flush()", "= unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm,", "the terms of the BSD 0-Clause License. #This program is", "opt.lambda_D # Weight for loss from discriminator guess on synthetic", "pre caching all images self.use_data_generator = True self.generator_architecture = opt.generator_architecture", "is not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G", "opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer if", "* 1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task self.im_w", "send images from B to G_A2B (and the opposite) to", "layers (uk) - can reduce checkerboard artifacts but the blurring", "Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 =", "= (opt.im_w * 1, opt.im_h * 1, 1) self.image_shapeB_in =", "self.use_data_generator: print('Data has been loaded') def load_model_and_weights(self, model, weights_path, iteration,", "import Network import sys import tensorflow as tf from utilities.data_loader", "WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS", "name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data ========== # Use", "rate should be decreased self.use_patchgan = opt.use_patchgan self.normalization = normalization", "', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ',", "model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE')", "of the BSD 0-Clause License. #This program is distributed in", "= opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size =", "architecture: ', self.generator_architecture) print('image width: ', self.im_w) print('image height: ',", "', self.im_w) print('image height: ', self.im_h) print('learning date G: ',", "0-Clause License. #This program is distributed in the hope that", "opt.add_extra_conv self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3)", "be useful, but WITHOUT ANY WARRANTY; without even the implied", "use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5]", "note update discriminator weights during generator training self.D_A_static.trainable = False", "output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB,", "real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data ==========", "opt.identity_mapping_modulus # Identity mapping will be done each time the", "opt.use_patchgan self.normalization = normalization # Loss hyperparameters self.lambda_1 = opt.lambda_1", "(opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeB_in = (None,", "# Cyclic loss weight A_2_B self.lambda_2 = opt.lambda_2 # Cyclic", "by_name): name = model.name + '_weights_epoch_' + str(iteration) final_path =", "sys import tensorflow as tf from utilities.data_loader import * class", "D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5", "implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "real and synthetic images loss_weights_D = [0.5] # 0.5 since", "on real and synthetic images elif self.generator_architecture == 'unet_mini': D_A", "Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic =", "self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A)", "model_outputs = [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse]", "= 1000 nr_B_test_imgs = 1000 if self.use_data_generator: print('--- Using dataloader", "self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2: ', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning)", "builds image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A = D_A(image_A)", "= Input(self.image_shapeB) guess_A = D_A(image_A) guess_B = D_B(image_B) self.D_A =", "self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B =", "loss def cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred - y_true))", "= opt.im_h self.data_root = opt.data_root self.img_shape = image_shape self.channels =", "on real and synthetic images loss_weights_D = [0.5] # 0.5", "= [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D,", "opt.crop_res # Resize convolution - instead of transpose convolution in", "Generator models ======================= # Do note update discriminator weights during", "Only store test images if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test,", "1) self.image_shapeB_in = (None, None, 3) else: self.image_shapeB = (opt.im_w", "A_2_B self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A self.lambda_D", "discriminator guess on synthetic images # Learning rates self.learning_rate_D =", "opt.epsilon_norm # self.crop_res = opt.crop_res # Resize convolution - instead", "Generator builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B')", "use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv,", "loss - sometimes send images from B to G_A2B (and", "self.crop_res = opt.crop_res # Resize convolution - instead of transpose", "use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5]", "self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) # Use Networks to", "self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done each", "FOR A PARTICULAR PURPOSE. # See the BSD 0-Clause License", "= tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self, y_true, y_pred): loss", "self.batch_size = 1 self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm #", "opt, image_shape=(256 * 1, 256 * 1, 3), load_training_data=True, normalization=InstanceNormalization,", "Network import sys import tensorflow as tf from utilities.data_loader import", "* from keras.engine.topology import Network import sys import tensorflow as", "transpose convolution in deconvolution layers (uk) - can reduce checkerboard", "= Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') #", "(None, None, 3) if self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w", "self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w * 1, opt.im_h *", "[] if not self.use_data_generator: print('Data has been loaded') def load_model_and_weights(self,", "self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight)", "to G_A2B (and the opposite) to teach identity mappings self.use_identity_learning", "= os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def print_info(self): print('fInitializing Cycle", "Ltd. All rights reserved. #This program is free software; you", "has been loaded') def load_model_and_weights(self, model, weights_path, iteration, by_name): name", "we train on real and synthetic images # Discriminator builds", "divisable with this number # PatchGAN - if false the", "self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G =", "self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D)", "Use Networks to avoid falsy keras error about weight descripancies", "Using dataloader during training ---') else: print('--- Caching data ---')", "GAN with parameters ...') print('task: ', self.task) print('generator architecture: ',", "= Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A = D_A(image_A) guess_B =", "models.ICCV_architectures import * from models.unet import * from keras.engine.topology import", "name = model.name + '_weights_epoch_' + str(iteration) final_path = os.path.join(root,", "self.im_h = opt.im_h self.data_root = opt.data_root self.img_shape = image_shape self.channels", "print('image width: ', self.im_w) print('image height: ', self.im_h) print('learning date", "self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1)", "output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB,", "[] self.B_test = [] self.A_train = [] self.B_train = []", "'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B], outputs=model_outputs,", "y_true, y_pred): loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self,", "number # PatchGAN - if false the discriminator learning rate", "compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2,", "'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A =", "1, 3) self.image_shapeB_in = (None, None, 3) # Identity loss", "CycleGAN(): def __init__(self, opt, image_shape=(256 * 1, 256 * 1,", "def __init__(self, opt, image_shape=(256 * 1, 256 * 1, 3),", "if self.task == 'Long2Short_raw': self.image_shapeB = (opt.im_w * 1, opt.im_h", "All rights reserved. #This program is free software; you can", "print_info(self): print('fInitializing Cycle GAN with parameters ...') print('task: ', self.task)", "or FITNESS FOR A PARTICULAR PURPOSE. # See the BSD", "we train on real and synthetic images elif self.generator_architecture ==", "update discriminator weights during generator training self.D_A_static.trainable = False self.D_B_static.trainable", "loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds real_A = Input(shape=self.image_shapeA, name='real_A')", "for more details. from keras.optimizers import Adam from models.ICCV_architectures import", "crop_size=self.im_w, generator=True) # Only store test images if opt.task ==", "to fetch all available images nr_A_test_imgs = 1000 nr_B_test_imgs =", "self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy',", "nr_B_test_imgs = 1000 if self.use_data_generator: print('--- Using dataloader during training", "0.5 since we train on real and synthetic images elif", "opt.im_w self.im_h = opt.im_h self.data_root = opt.data_root self.img_shape = image_shape", "opt.im_h * 1, 1) self.image_shapeB_in = (None, None, 3) else:", "model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight)", "= unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] # 0.5", "= modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution,", "even the implied warranty of MERCHANTABILITY or FITNESS FOR A", "keras error about weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model')", "======= Data ========== # Use 'None' to fetch all available", "self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B", "print('image height: ', self.im_h) print('learning date G: ', self.learning_rate_G) print('learning", "normalization # Loss hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic loss", "True self.generator_architecture = opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv", "dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs", "self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model')", "# Loss hyperparameters self.lambda_1 = opt.lambda_1 # Cyclic loss weight", "self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1 self.beta_2", "modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan, disc_use_4_layers=True) loss_weights_D =", "[self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B)", "def load_model_and_weights(self, model, weights_path, iteration, by_name): name = model.name +", "', self.generator_architecture) print('image width: ', self.im_w) print('image height: ', self.im_h)", "name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A)", "weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B,", "you can redistribute it and/or modify it under the terms", "self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D,", "', self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ',", "loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss='binary_crossentropy',", "self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1) self.image_shapeB_in", "= self.G_A2B(real_A) synthetic_A = self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic =", "self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only", "the blurring might affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution #", "opt.generator_architecture self.use_norm = opt.use_norm self.add_extra_conv = opt.add_extra_conv self.image_shapeA = (opt.im_w", "1, opt.im_h * 1, 3) self.image_shapeA_in = (None, None, 3)", "each time the iteration number is divisable with this number", "the opposite) to teach identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus", "input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model')", "images elif self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm,", "epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D =", "model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning: model_outputs.append(synthetic_A) model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE':", "opt.task self.im_w = opt.im_w self.im_h = opt.im_h self.data_root = opt.data_root", "from models.ICCV_architectures import * from models.unet import * from keras.engine.topology", "= [] if not self.use_data_generator: print('Data has been loaded') def", "in the hope that it will be useful, but WITHOUT", "guess on synthetic images # Learning rates self.learning_rate_D = opt.lr_D", "========== if self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True)", "opt.im_h * 1, 3) self.image_shapeA_in = (None, None, 3) if", "= opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res = opt.crop_res #", "self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A,", "', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ',", "None, 3) else: self.image_shapeB = (opt.im_w * 1, opt.im_h *", "self.use_data_generator: print('--- Using dataloader during training ---') else: print('--- Caching", "clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D =", "print('generator architecture: ', self.generator_architecture) print('image width: ', self.im_w) print('image height:", "opt.clipvalue is not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue)", "============= Generator models ======================= # Do note update discriminator weights", "self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1,", "* 1, opt.im_h * 1, 1) self.image_shapeB_in = (None, None,", "normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A = unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA,", "useful, but WITHOUT ANY WARRANTY; without even the implied warranty", "= opt.epsilon_norm # self.crop_res = opt.crop_res # Resize convolution -", "name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A =", "self.use_resize_convolution = opt.use_resize_convolution # Supervised learning part self.use_supervised_learning = opt.use_supervised_learning", "name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse,", "B_2_A self.lambda_D = opt.lambda_D # Weight for loss from discriminator", "None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G,", "D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D = [0.5] #", "compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic) model_outputs.append(dB_guess_synthetic) if self.use_supervised_learning:", "distributed in the hope that it will be useful, but", "unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A =", "mapping will be done each time the iteration number is", "be done each time the iteration number is divisable with", "name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization,", "self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1) print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D)", "but WITHOUT ANY WARRANTY; without even the implied warranty of", "loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else: self.D_A.compile(optimizer=self.opt_D, loss='binary_crossentropy', loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D,", "date G: ', self.learning_rate_G) print('learning date D: ', self.learning_rate_D) print('use", "falsy keras error about weight descripancies self.D_A_static = Network(inputs=image_A, outputs=guess_A,", "= [reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights", "model_outputs.append(synthetic_B) if self.supervised_loss == 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model", "= (opt.im_w * 1, opt.im_h * 1, 3) self.image_shapeB_in =", "self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus) print('lambda_1: ', self.lambda_1)", "synthetic images elif self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm,", "= unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_A2B_model') self.G_B2A", "3) # Identity loss - sometimes send images from B", "unet_generator_mini(input=self.image_shapeB, output=self.image_shapeA, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning:", "PARTICULAR PURPOSE. # See the BSD 0-Clause License for more", "# Do note update discriminator weights during generator training self.D_A_static.trainable", "print('fInitializing Cycle GAN with parameters ...') print('task: ', self.task) print('generator", "# Learning rates self.learning_rate_D = opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1", "false the discriminator learning rate should be decreased self.use_patchgan =", "# ======= Data ========== # Use 'None' to fetch all", "checkerboard artifacts but the blurring might affect the cycle-consistency self.use_resize_convolution", "# PatchGAN - if false the discriminator learning rate should", "', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self,", "instead of transpose convolution in deconvolution layers (uk) - can", "'Long2Short_raw': self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1)", "See the BSD 0-Clause License for more details. from keras.optimizers", "during training ---') else: print('--- Caching data ---') sys.stdout.flush() if", "+ '_weights_epoch_' + str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path,", "Discriminator builds image_A = Input(self.image_shapeA) image_B = Input(self.image_shapeB) guess_A =", "loaded') def load_model_and_weights(self, model, weights_path, iteration, by_name): name = model.name", "convolution in deconvolution layers (uk) - can reduce checkerboard artifacts", "self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture ==", "but the blurring might affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution", "1 self.clipvalue = opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res =", "opt.supervised_loss # optimizer if opt.clipvalue is not None: self.opt_D =", "self.cycle_loss, self.lse, self.lse] compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D] model_outputs.append(dA_guess_synthetic)", "Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A) synthetic_A", "[reconstructed_A, reconstructed_B] compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse] compile_weights =", "D: ', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning)", "None, 3) # Identity loss - sometimes send images from", "outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # ======= Data ========== #", "D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm,", "+ str(iteration) final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name)) model.load_weights(final_path, by_name=by_name) def", "(C) 2020. Huawei Technologies Co., Ltd. All rights reserved. #This", "self.use_patchgan = opt.use_patchgan self.normalization = normalization # Loss hyperparameters self.lambda_1", "name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) else:", "self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA, output=self.image_shapeB, name='G_A2B_model')", "outputs=guess_B, name='D_B_static_model') # ============= Generator models ======================= # Do note", "nr_B_test_imgs) else: self.A_test = [] self.B_test = [] self.A_train =", "Caching data ---') sys.stdout.flush() if load_training_data: if self.use_data_generator: self.data_generator =", "opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer if opt.clipvalue is not", "', self.im_h) print('learning date G: ', self.learning_rate_G) print('learning date D:", "rights reserved. #This program is free software; you can redistribute", "= load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store test", "opt.lr_D self.learning_rate_G = opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2", "mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping", "done each time the iteration number is divisable with this", "of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See", "* 1, 1) self.image_shapeB_in = (None, None, 3) else: self.image_shapeB", "blurring might affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised", "== 'MAE': compile_losses.append('MAE') compile_losses.append('MAE') compile_weights.append(self.supervised_weight) compile_weights.append(self.supervised_weight) self.G_model = Model(inputs=[real_A, real_B],", "', self.task) print('generator architecture: ', self.generator_architecture) print('image width: ', self.im_w)", "loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self, y_true, y_pred):", "elif self.generator_architecture == 'unet_mini': D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan)", "self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store", "1, 256 * 1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task =", "# Resize convolution - instead of transpose convolution in deconvolution", "# Generators if self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution,", "= modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini':", "= (None, None, 3) if self.task == 'Long2Short_raw': self.image_shapeB =", "batch_size=self.batch_size, crop_size=self.im_w, generator=True) # Only store test images if opt.task", "D_A(image_A) guess_B = D_B(image_B) self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B", "B to G_A2B (and the opposite) to teach identity mappings", "test images if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names", "= Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB, name='real_B') synthetic_B = self.G_A2B(real_A)", "License for more details. from keras.optimizers import Adam from models.ICCV_architectures", "outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator", "self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model') self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model')", "self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss # optimizer if opt.clipvalue", "print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss) def lse(self, y_true, y_pred):", "get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = [] self.B_test = [] self.A_train", "G_A2B (and the opposite) to teach identity mappings self.use_identity_learning =", "', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ',", "identity mappings self.use_identity_learning = opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity", "image_shape=(256 * 1, 256 * 1, 3), load_training_data=True, normalization=InstanceNormalization, ):", "# Identity mapping will be done each time the iteration", "= Model(inputs=[real_A, real_B], outputs=model_outputs, name='G_model') self.G_model.compile(optimizer=self.opt_G, loss=compile_losses, loss_weights=compile_weights) # =======", "loss weight B_2_A self.lambda_D = opt.lambda_D # Weight for loss", "BSD 0-Clause License for more details. from keras.optimizers import Adam", "from discriminator guess on synthetic images # Learning rates self.learning_rate_D", "0-Clause License for more details. from keras.optimizers import Adam from", "normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution, name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE')", "print('lambda_2: ', self.lambda_2) print('lambda_D: ', self.lambda_D) print('beta_1: ', self.beta_1) print('beta_2:", "unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan)", "the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR", "opt.lr_G self.beta_1 = opt.beta_1 self.beta_2 = opt.beta_2 self.batch_size = 1", "self.beta_2, clipvalue=self.clipvalue) self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D", "Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator models ======================= # Do", "not None: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue) self.opt_G =", "self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ',", "weight B_2_A self.lambda_D = opt.lambda_D # Weight for loss from", "affect the cycle-consistency self.use_resize_convolution = opt.use_resize_convolution # Supervised learning part", "= opt.use_identity_learning self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be", "False self.D_B_static.trainable = False # Generators if self.generator_architecture == 'ICCV':", "name='G_B2A_model') if self.use_identity_learning: self.G_A2B.compile(optimizer=self.opt_G, loss='MAE') self.G_B2A.compile(optimizer=self.opt_G, loss='MAE') # Generator builds", "Resize convolution - instead of transpose convolution in deconvolution layers", "generator=True) # Only store test images if opt.task == 'Vimeo2Long_SID':", "store test images if opt.task == 'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names,", "if load_training_data: if self.use_data_generator: self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size, crop_size=self.im_w,", "self.beta_2) print('use_supervised_learning: ', self.use_supervised_learning) print('supervised_weight: ', self.supervised_weight) print('supervised_loss: ', self.supervised_loss)", "(None, None, 3) # Identity loss - sometimes send images", "if false the discriminator learning rate should be decreased self.use_patchgan", "self.G_B2A(synthetic_B) reconstructed_B = self.G_A2B(synthetic_A) model_outputs = [reconstructed_A, reconstructed_B] compile_losses =", "#This program is free software; you can redistribute it and/or", "Co., Ltd. All rights reserved. #This program is free software;", "self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test =", "self.img_shape = image_shape self.channels = self.img_shape[-1] # Fetch data during", "loss weight A_2_B self.lambda_2 = opt.lambda_2 # Cyclic loss weight", "False # Generators if self.generator_architecture == 'ICCV': self.G_A2B = modelGenerator(conv_kernel_c7Ak=7,", "test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test = [] self.B_test", "image_shape self.channels = self.img_shape[-1] # Fetch data during training instead", "Cyclic loss weight A_2_B self.lambda_2 = opt.lambda_2 # Cyclic loss", "self.A_test = [] self.B_test = [] self.A_train = [] self.B_train", "self.im_w = opt.im_w self.im_h = opt.im_h self.data_root = opt.data_root self.img_shape", "= Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if", "outputs=guess_B, name='D_B_model') if self.use_patchgan: self.D_A.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D) self.D_B.compile(optimizer=self.opt_D, loss=self.lse, loss_weights=loss_weights_D)", "use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm, use_patchgan=self.use_patchgan) loss_weights_D", "= Network(inputs=image_B, outputs=guess_B, name='D_B_static_model') # ============= Generator models ======================= #", "# ======= Discriminator model ========== if self.generator_architecture == 'ICCV': D_A", "name='G_A2B_model') self.G_B2A = modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture", "it under the terms of the BSD 0-Clause License. #This", "modelGenerator(conv_kernel_c7Ak=7, use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB, output=self.image_shapeA, name='G_B2A_model') elif self.generator_architecture == 'unet_mini': self.G_A2B", "self.use_patchgan) print('use_identity_learning: ', self.use_identity_learning) print('normalization: ', self.normalization) print('identity_mapping_modulus: ', self.identity_mapping_modulus)", "# Generator builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B = Input(shape=self.image_shapeB,", "decreased self.use_patchgan = opt.use_patchgan self.normalization = normalization # Loss hyperparameters", "Networks to avoid falsy keras error about weight descripancies self.D_A_static", "weight A_2_B self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A", "Model(inputs=image_A, outputs=guess_A, name='D_A_model') self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model') if self.use_patchgan:", "= image_shape self.channels = self.img_shape[-1] # Fetch data during training", "load_training_data=True, normalization=InstanceNormalization, ): self.task = opt.task self.im_w = opt.im_w self.im_h", "loss='MAE') # Generator builds real_A = Input(shape=self.image_shapeA, name='real_A') real_B =", "= opt.task self.im_w = opt.im_w self.im_h = opt.im_h self.data_root =", "tf.reduce_mean(tf.squared_difference(y_pred, y_true)) return loss def cycle_loss(self, y_true, y_pred): loss =", "if self.generator_architecture == 'ICCV': D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan, disc_use_4_layers=True) D_B", "= opt.use_patchgan self.normalization = normalization # Loss hyperparameters self.lambda_1 =", "= opt.lambda_2 # Cyclic loss weight B_2_A self.lambda_D = opt.lambda_D", "opt.im_h * 1, 3) self.image_shapeB_in = (None, None, 3) #", "import tensorflow as tf from utilities.data_loader import * class CycleGAN():", "date D: ', self.learning_rate_D) print('use patchGAN: ', self.use_patchgan) print('use_identity_learning: ',", "self.G_B2A(real_B) dA_guess_synthetic = self.D_A_static(synthetic_A) dB_guess_synthetic = self.D_B_static(synthetic_B) reconstructed_A = self.G_B2A(synthetic_B)", "else: print('--- Caching data ---') sys.stdout.flush() if load_training_data: if self.use_data_generator:", "self.beta_2) # # ======= Discriminator model ========== if self.generator_architecture ==", "from utilities.data_loader import * class CycleGAN(): def __init__(self, opt, image_shape=(256", "= opt.identity_mapping_modulus # Identity mapping will be done each time", "'unet_mini': self.G_A2B = unet_generator_mini(input=self.image_shapeA, output=self.image_shapeB, normalization=normalization, epsilon=self.epsilon_norm, use_norm=self.use_norm, add_extra_conv=self.add_extra_conv, use_resize_convolution=self.use_resize_convolution,", "should be decreased self.use_patchgan = opt.use_patchgan self.normalization = normalization #", "return loss def cycle_loss(self, y_true, y_pred): loss = tf.reduce_mean(tf.abs(y_pred -", "# Weight for loss from discriminator guess on synthetic images", "since we train on real and synthetic images elif self.generator_architecture", "'Vimeo2Long_SID': self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs, nr_B_test_imgs) else: self.A_test", "is distributed in the hope that it will be useful,", "Weight for loss from discriminator guess on synthetic images #", "self.beta_1, self.beta_2, clipvalue=self.clipvalue) else: self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2) self.opt_G", "without even the implied warranty of MERCHANTABILITY or FITNESS FOR", "self.task = opt.task self.im_w = opt.im_w self.im_h = opt.im_h self.data_root", "# Supervised learning part self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight", "Data ========== # Use 'None' to fetch all available images", "def print_info(self): print('fInitializing Cycle GAN with parameters ...') print('task: ',", "* 1, 256 * 1, 3), load_training_data=True, normalization=InstanceNormalization, ): self.task", "self.generator_architecture) print('image width: ', self.im_w) print('image height: ', self.im_h) print('learning", "disc_use_4_layers=True) loss_weights_D = [0.5] # 0.5 since we train on", "self.use_supervised_learning = opt.use_supervised_learning self.supervised_weight = opt.supervised_weight self.supervised_loss = opt.supervised_loss #", "opt.clipvalue self.epsilon_norm = opt.epsilon_norm # self.crop_res = opt.crop_res # Resize", "iteration, by_name): name = model.name + '_weights_epoch_' + str(iteration) final_path" ]
[ "api from fastapi.templating import Jinja2Templates from starlette.requests import Request #", "from fastapi import FastAPI from fastapi.staticfiles import StaticFiles from routes", "Case\", description=f\"endpoint para subir planilhas para banco de dados relacional", "fastapi.staticfiles import StaticFiles from routes import doc, api from fastapi.templating", "# configure static and templates file on jinja 2 app", "builders and initiate doc.init_app(app) api.init_app(app, \"/api\") # templates = Jinja2Templates(directory=\"templates\")", "#views @app.get(\"/\", tags=[\"/view\"]) async def index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\":", "app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory builders and initiate doc.init_app(app) api.init_app(app,", "static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory builders and initiate", "starlette.requests import Request # configure static and templates file on", "def index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__ ==", "import Jinja2Templates from starlette.requests import Request # configure static and", "2 app = FastAPI( title=f\"Technical Case\", description=f\"endpoint para subir planilhas", "FastAPI( title=f\"Technical Case\", description=f\"endpoint para subir planilhas para banco de", "de dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")", "version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory builders and", "import Request # configure static and templates file on jinja", "#import factory builders and initiate doc.init_app(app) api.init_app(app, \"/api\") # templates", "from routes import doc, api from fastapi.templating import Jinja2Templates from", "name=\"static\") #import factory builders and initiate doc.init_app(app) api.init_app(app, \"/api\") #", ") app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory builders and initiate doc.init_app(app)", "import FastAPI from fastapi.staticfiles import StaticFiles from routes import doc,", "static and templates file on jinja 2 app = FastAPI(", "subir planilhas para banco de dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\"", "Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async def index(request: Request): return templates.TemplateResponse(\"index.html\",", "dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import", "templates file on jinja 2 app = FastAPI( title=f\"Technical Case\",", "from fastapi.templating import Jinja2Templates from starlette.requests import Request # configure", "doc.init_app(app) api.init_app(app, \"/api\") # templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"])", "tags=[\"/view\"]) async def index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\": request}) if", "and templates file on jinja 2 app = FastAPI( title=f\"Technical", "banco de dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"),", "from fastapi.staticfiles import StaticFiles from routes import doc, api from", "import StaticFiles from routes import doc, api from fastapi.templating import", "configure static and templates file on jinja 2 app =", "from starlette.requests import Request # configure static and templates file", "on jinja 2 app = FastAPI( title=f\"Technical Case\", description=f\"endpoint para", "jinja 2 app = FastAPI( title=f\"Technical Case\", description=f\"endpoint para subir", "description=f\"endpoint para subir planilhas para banco de dados relacional Postgres.\",", "templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async def index(request: Request):", "= Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async def index(request: Request): return", "async def index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__", "planilhas para banco de dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" )", "Request # configure static and templates file on jinja 2", "StaticFiles(directory=\"static\"), name=\"static\") #import factory builders and initiate doc.init_app(app) api.init_app(app, \"/api\")", "fastapi.templating import Jinja2Templates from starlette.requests import Request # configure static", "doc, api from fastapi.templating import Jinja2Templates from starlette.requests import Request", "\"/api\") # templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async def", "title=f\"Technical Case\", description=f\"endpoint para subir planilhas para banco de dados", "para subir planilhas para banco de dados relacional Postgres.\", version=f\"0.0.1\",", "factory builders and initiate doc.init_app(app) api.init_app(app, \"/api\") # templates =", "routes import doc, api from fastapi.templating import Jinja2Templates from starlette.requests", "file on jinja 2 app = FastAPI( title=f\"Technical Case\", description=f\"endpoint", "api.init_app(app, \"/api\") # templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async", "StaticFiles from routes import doc, api from fastapi.templating import Jinja2Templates", "@app.get(\"/\", tags=[\"/view\"]) async def index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\": request})", "Request): return templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__ == \"__main__\": uvicorn.run(\"main:app\",", "and initiate doc.init_app(app) api.init_app(app, \"/api\") # templates = Jinja2Templates(directory=\"templates\") #views", "Jinja2Templates from starlette.requests import Request # configure static and templates", "initiate doc.init_app(app) api.init_app(app, \"/api\") # templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\",", "# templates = Jinja2Templates(directory=\"templates\") #views @app.get(\"/\", tags=[\"/view\"]) async def index(request:", "= FastAPI( title=f\"Technical Case\", description=f\"endpoint para subir planilhas para banco", "import uvicorn from fastapi import FastAPI from fastapi.staticfiles import StaticFiles", "FastAPI from fastapi.staticfiles import StaticFiles from routes import doc, api", "para banco de dados relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\",", "Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory builders", "index(request: Request): return templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__ == \"__main__\":", "templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__ == \"__main__\": uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8080)", "import doc, api from fastapi.templating import Jinja2Templates from starlette.requests import", "return templates.TemplateResponse(\"index.html\", {\"request\": request}) if __name__ == \"__main__\": uvicorn.run(\"main:app\", host=\"0.0.0.0\",", "relacional Postgres.\", version=f\"0.0.1\", static_directory=\"static\" ) app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\") #import factory", "app = FastAPI( title=f\"Technical Case\", description=f\"endpoint para subir planilhas para", "uvicorn from fastapi import FastAPI from fastapi.staticfiles import StaticFiles from", "fastapi import FastAPI from fastapi.staticfiles import StaticFiles from routes import" ]
[ "def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures =", "The schema and table you want to upload to. E.g.,", "need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter,", "hidden=True, **kwargs): \"\"\"Read data from Civis using a custom SQL", "Returns ------- fut : :class:`~civis.futures.CivisFuture` A future which returns the", "'\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs)) return", "import before failing. existing_table_rows : str, optional The behaviour if", "client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped,", "and NO_PANDAS: raise ImportError(\"use_pandas is True but pandas is not", "archive=archive, hidden=hidden, **kwargs) return data def export_to_civis_file(sql, database, job_name=None, client=None,", "as the first argument to this function. Parameters ---------- df", "'wb') as fout: fout.write(headers) with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw,", "delimiter to use, if any. One of ``','``, ``'\\t'``, or", "= read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a = df[\"column_a\"] >>> data", "Civis file IDs. Reference by name to this argument is", "+ '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers,", "reduce the work that Platform does to provide a single", "to retrieve headers and once to retrieve the data. This", "df = read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a = df[\"column_a\"] >>>", ".format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf,", "file. \"\"\" if client is None: client = APIClient(api_key=api_key) if", "update; column statistics may not be available for up to", "civis import APIClient from civis._utils import maybe_get_random_name from civis.base import", "headers = None try: # use 'begin read only;' to", "None: client = APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is deprecated and", "keys: 'query': str The query. 'header': list of str The", "------ ImportError If `use_pandas` is ``True`` and `pandas` is not", "the response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query has completed", "DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one of {}\".format(DELIMITERS.keys()) csv_settings", "will always be filled. Raises ------ ValueError If the input", "csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id", "the `header` parameter (which will be passed directly to :func:`~pandas.DataFrame.to_csv`)", "optional, default \"immediate\" One of \"delayed\" or \"immediate\". If \"immediate\",", "= civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result() # Wait for job", "if client is None: client = APIClient(api_key=api_key) db_id = client.get_database_id(database)", "\"\"\"Upload the contents of a local CSV file to Civis.", "and include_header: compression = 'none' # don't support parallel unload;", "Note that this is true regardless of whether the destination", "``'\\\\t'`` or ``'|'``. headers : bool, optional Whether or not", "release, a ``'gzip'`` compressed file will be returned for all", "headers. primary_keys: list[str], optional A list of the primary key", "None try: # use 'begin read only;' to ensure we", "read only;' to ensure we can't change state sql =", "credential. If ``None``, the default credential will be used. include_header:", "has quotes escaped with a backslash. Defaults to false. execution:", "---------- column_list: list[dict] the list of columns from file cleaning.", "from civis._deprecation import deprecate_param import requests try: from io import", "the same destination table. delimiter : string, optional The column", "and once to retrieve the data. This is done to", "'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as", "else False csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client,", ")[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns else", ":func:`python:csv.reader` if `use_pandas` is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame`", "of ``','``, ``'\\t'``, or ``'|'``. Default: ``'|'``. max_file_size: int, optional", "inconsistencies are detected, raise an error. first_completed = done.pop() output_file", "file has quotes escaped with a backslash. Defaults to false.", "headers - ' 'please ensure all imported files either '", "you want to upload to. E.g., ``'scratch.table'``. api_key : DEPRECATED", "is requested, a gzip file is not actually returned #", "file_to_civis(data, name, client=client) log.debug('Uploaded file %s to Civis file %s',", "detected_info['compression']: raise CivisImportError('Mismatch between detected and provided ' 'compressions -", "!= detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\" does not match '", "will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture`", "table: str Either a Redshift schema and table name combined", "except ImportError: from cStringIO import StringIO try: import pandas as", "data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive,", "if there are more columns in the table. delimiter :", "or int Read data from this database. Can be the", "to false. execution: string, optional, default \"immediate\" One of \"delayed\"", "- other files will be # compared to this one.", "poll_on_creation=False) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result()", "of results from :func:`python:csv.reader`. job_name : str, optional A name", "results to Civis file %s (%s)', outputs[0][\"output_name\"], file_id) if use_pandas:", "escaped with a backslash. Defaults to false. execution: string, optional,", "optional The database credential ID. If ``None``, the default credential", "always set compression to gzip to reduce I/O csv_settings =", "ID of the database credential. If ``None``, the default credential", "To store the index along with the other values, use", "to give the job. If omitted, a random job name", "in the same format. Parameters ---------- file_id : int or", "if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True but pandas", "= df[\"column_a\"] See Also -------- civis.io.read_civis_sql : Read directly into", "Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client is None: client =", "Export data from this database. Can be the database name", "== 'none': with open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response, fout)", "filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql, database, job_name,", "results of a query to a Civis file Parameters ----------", "return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if", ":class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will be", "round of cleaning results as compared to provided values. Parameters", "need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) =", "except Exception as exc: # NOQA log.debug(\"Failed to retrieve headers", "with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write", "accounted for - # Since concurrent.futures.wait returns two sets, it", "SQL query's results in a Civis file \"\"\" if use_pandas", "True except ValueError: table_exists = False # Use Preprocess endpoint", "\"\"\" if archive: warnings.warn(\"`archive` is deprecated and will be removed", "needed # and perform necessary file cleaning need_table_columns = ((not", "necessary file cleaning need_table_columns = ((not table_exists or existing_table_rows ==", "share a base type (e.g. INT, VARCHAR, DECIMAl) rather than", "similar manifest file to conventional S3 UNLOAD statements except the", "= export_to_civis_file(sql, \"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"] See Also --------", "bool, optional If ``True`` (the default), this job will not", "credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client,", "attempts to autodetect whether or not the first row contains", "double quoted, e.g. ``'my_schema.\"my.table\"'``. database : str or int Read", "distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False,", "DEPRECATED str, optional Your Civis API key. If not given,", "(e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- sql : str", "database = \"my_data\" >>> columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY", "download = _download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download) if archive:", "record. If existing_table_rows is \"upsert\", this field is required. Note", "else: url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to", "csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False)", "that would cause breaking change for now # when gzip", "DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0]", "the data. The first execution of the custom SQL is", "client is None: client = APIClient(api_key=api_key) if use_pandas and NO_PANDAS:", "= pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql \"\"\" if client", "schema_name_tup = next(reader) if len(schema_name_tup) == 1: schema_name_tup = (None,", "headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers - '", ": str, optional The column to use as the sortkey", "= df[\"column_a\"] >>> data = read_civis_sql(sql, \"my_database\") >>> columns =", "Type of compression used. 'delimiter': str Delimiter that separates the", "first argument to this function. Parameters ---------- df : :class:`pandas:pandas.DataFrame`", "_decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as fout: arcname = path.basename(local_path)", "tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}: File base type was", "contents into memory. civis.io.read_civis_sql : Read results of a SQL", "civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids = [entry['id'] for entry in", "regardless if there are more columns in the table. delimiter", "`pandas` `DataFrame`. Note that if `use_pandas` is ``False``, no parsing", "optional Which delimiter to use, if any. One of ``','``,", ":envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number of rows", "warnings import zlib import gzip import zipfile from civis import", "returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query has", "The SQL select string to be executed. database : str", "output_obj_id: int The file ID under consideration; used for error", "not installed. Examples -------- >>> table = \"schema.table\" >>> database", "= \", \".join(columns) if columns is not None else \"*\"", "raise CivisImportError('Mismatch between detected headers - ' 'please ensure all", "{}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if compression != detected_info['compression']:", "use as the sortkey for the table. sortkey2 : str,", "fout: arcname = path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname =", "will decompress the stream # however, our use of content-encoding", "get the table columns as needed # and perform necessary", "job name will be used. client : :class:`civis.APIClient`, optional If", "\"\"\"Export data from Civis to a local CSV file. The", "sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None,", "def _decompress_stream(response, buf, write_bytes=True): # use response.raw for a more", "client, headers, need_table_columns, delimiter ) table_columns = table_columns or cleaned_table_columns", "client = APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id = credential_id or", ">>> data = read_civis_sql(sql, \"my_database\") >>> columns = data.pop(0) >>>", "civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv`", "_get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id,", "ensure all imported files either ' 'have a header or", "name or ID. use_pandas : bool, optional If ``True``, return", "use_pandas=True) >>> col_a = df[\"column_a\"] >>> data = read_civis_sql(sql, \"my_database\")", "use response.raw for a more consistent approach # if content-encoding", "type was {}, but expected {}'.format( idx, fcol_base_type, tcol_base_type )", "CSV parts are accessible via both files endpoint IDs and", "own names parameter _kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] =", "\"upsert\", this field is required. Note that this is true", "second column in a compound sortkey for the table. table_columns", ": Write directly to a CSV file. civis.io.civis_file_to_table : Upload", "Civis UI. **kwargs : kwargs Extra keyword arguments will be", "raise ValueError(\"Cannot parse schema and table. \" \"Does '{}' follow", "client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns", "``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file", "to use as the distkey for the table. sortkey1 :", "file to a Civis table \"\"\" client = client or", "if kwargs.get('header') is False else True with TemporaryDirectory() as tmp_dir:", "types is performed and each row will be a list", "INT, VARCHAR, DECIMAl) rather than that # they have the", "import path import io import logging import os import shutil", "this database. Can be the database name or ID. use_pandas", "list of column names. Column SQL transformations are possible. If", "file to have. Default: ``None``. polling_interval : int or float,", "from the import before failing. existing_table_rows : str, optional The", "errors. delimiter: str The provided value for the file delimiter.", "\"\"\"Store results of a query to a Civis file Parameters", "method for retrieving the data. The first execution of the", "(%s)', outputs[0][\"output_name\"], file_id) if use_pandas: # allows users to enter", "client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return", "delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload a `pandas` `DataFrame` into", "ImportError: from cStringIO import StringIO try: import pandas as pd", "\"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis :", "[] for fid in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False,", "max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None,", "**kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database, job_name=None,", "from staging table to final table after a brief delay,", "Not creating file %s.\" % (job_id, run_id, filename), RuntimeWarning) return", "to a Civis file Parameters ---------- sql : str The", "table_exists = True except ValueError: table_exists = False # Use", "in the returned dictionary containing a list of column names.", "a SQL query into memory. civis.io.civis_to_csv : Write directly to", "client is None: client = APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id", "Parameters ---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame` to upload to", "None\") select = \", \".join(columns) if columns is not None", "this job will not appear in the Civis UI. Returns", "# is different which would introduce a breaking change headers", "or do not.') if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter", "these values for the data types provided, and decreases the", "`CivisFuture` object. Notes ----- This reads the contents of `filename`", "optional The second column in a compound sortkey for the", "base type was {}, but expected {}'.format( idx, fcol_base_type, tcol_base_type", "use_pandas : bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,", "cleaning need_table_columns = ((not table_exists or existing_table_rows == 'drop') and", "without SQL. civis.io.civis_to_csv : Write directly to a CSV file.", "data at scale. headers = _get_headers(client, sql, db_id, credential_id, polling_interval)", "\"upsert\", delayed executions move data from staging table to final", "the database name or ID. use_pandas : bool, optional If", "sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None,", "False csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client, sql,", "file_columns: List[Dict[str, str]] The columns detected by the Civis API", "schema.table\" >>> fut = civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result() #", "contains headers. credential_id : str or int, optional The ID", "expected attributes. \"\"\" if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between", "- skipping column ' 'detection'.format(table=table)) table_exists = True except ValueError:", "CivisImportError( 'Encountered the following errors for file {}:\\n\\t{}'.format( output_obj_id, '\\n\\t'.join(error_msgs)", "compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter", "= True if headers is None else False csv_settings =", "headers = _get_headers(client, sql, db_id, credential_id, polling_interval) # include_header defaults", "as headers. The default, ``None``, attempts to autodetect whether or", "json import concurrent.futures import csv from os import path import", "reduce I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False)", "include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def", "if len(schema_name_tup) != 2: raise ValueError(\"Cannot parse schema and table.", "compared to provided values. Parameters ---------- detected_info: Dict[str, Any] The", "after the sql query has completed and the result has", "d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk =", "existing_table_rows is \"upsert\", delayed executions move data from staging table", "Also -------- civis.APIClient.scripts.post_sql \"\"\" if client is None: client =", "the contents of one or more Civis files to a", "client.default_credential csv_settings = csv_settings or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id,", "hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table, client=None,", "This may be the case if their columns have different", "Default: ``None``. polling_interval : int or float, optional Number of", "= [row[col_a_index] for row in data] >>> df = read_civis(\"schema.table\",", ">>> fut = export_to_civis_file(sql, \"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"] See", "file with no compression unless include_header is set to False.", "= b'' delimiter = DELIMITERS.get(delimiter) if not delimiter: raise ValueError(\"delimiter", "run %d of SQL script %d', run_job.id, export_job.id) return export_job.id,", "name combined with a \".\", or else a single table", "_kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data = pd.read_csv(url, **_kwargs) else: response", "from civis import APIClient from civis._utils import maybe_get_random_name from civis.base", "import_name = 'CSV import to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv(", "reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup = next(reader) if", "_replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter, table_columns def _check_column_types(table_columns, file_columns,", "Future. Thus it is necessary to account # for these", "# their precisions/lengths - setting this option will allow the", "The ``schema`` may be None if the input is only", "column \"name\" and \"sqlType\". The import will only copy these", "if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\" does not", "table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None,", "via both files endpoint IDs and presigned S3 urls. Parameters", "Whether or not to quote fields. Default: ``False``. prefix: str,", "Default: ``True``. compression: str, optional Type of compression to use,", "list of the columns indicating a record has been updated.", "this file. database : str or int Upload data into", "strings. The ``schema`` may be None if the input is", "all results from files are correctly accounted for - #", "fut = query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns']", "mutating input arguments new_col = dict(col) if new_col.get('name') is None:", "row in data] Notes ----- This reads the data into", "in a Civis file \"\"\" if archive: warnings.warn(\"`archive` is deprecated", "to reduce I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None,", "into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns ------- data :", "response.raw for a more consistent approach # if content-encoding is", "if headers is None else False csv_settings = dict(include_header=include_header, compression='gzip')", "'schema.tablename' string Remember that special characters (such as '.') can", "client or APIClient() script_id, run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name,", ">>> col_a = df[\"column_a\"] See Also -------- civis.io.read_civis_sql : Read", "once to retrieve the data. This is done to use", "columns are exported. use_pandas : bool, optional If ``True``, return", "a single round of cleaning results as compared to provided", "output file # with headers prepended to it due to", "not the first row contains headers. primary_keys: list[str], optional A", "quote fields. Default: ``False``. polling_interval : int or float, optional", "data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>> col_a = [row[col_a_index] for", "while chunk or d.unused_data: if d.unused_data: to_decompress = d.unused_data +", "between checks for job completion. archive : bool, optional (deprecated)", "be incompatible for import. This may be the case if", "columns=columns) >>> columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>>", "is true regardless of whether the destination database itself requires", "the data types provided, and decreases the # risk of", "a Civis file \"\"\" if archive: warnings.warn(\"`archive` is deprecated and", "str Delimiter that separates the cells. Examples -------- >>> sql", "is required. escaped: bool, optional A boolean value indicating whether", "Examples -------- >>> file_id = 100 >>> fut = civis.io.civis_file_to_table(file_id,", "source file. Each dictionary should have keys for column \"name\"", "client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers,", "as part of the `table` input.\") db_id = client.get_database_id(database) cred_id", "delimiter, compression, output_file_id): \"\"\"Check a single round of cleaning results", "by the Civis API. headers: bool The provided value for", "Civis table. The `DataFrame`'s index will not be included. To", "client is None: client = APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is", "as fout: fout.write(headers) with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout,", "hidden=True, csv_settings=None): \"\"\"Store results of a query to a Civis", "with gzip.open(local_path, 'wb') as fout: fout.write(headers) with open(local_path, 'ab') as", "% (job_id, run_id, filename), RuntimeWarning) return else: url = outputs[0][\"path\"]", "(such as '.') can only be included in a schema", "or table name if delimited by double-quotes. Parameters ---------- table:", "detected_info[\"columnDelimiter\"]) ) if compression != detected_info['compression']: raise CivisImportError('Mismatch between detected", "SQL query into memory. civis.io.civis_to_csv : Write directly to a", "bool, optional If ``True`` include a key in the returned", "\"\"\"Read data from a Civis table. Parameters ---------- table :", "name will be used. api_key : DEPRECATED str, optional Your", "query into memory. civis.io.civis_to_csv : Write directly to a CSV", "has completed and the result has been stored as a", "client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True):", "= DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one of {}\".format(", "A list of the primary key column(s) of the destination", "completed file cleaning - other files will be # compared", "`use_pandas` is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or list", "has been stored as a Civis file. Examples -------- >>>", "= CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def", "def _get_sql_select(table, columns=None): if columns and not isinstance(columns, (list, tuple)):", "string to be executed. database : str or int Export", "``True``, the first line of the CSV will be headers.", "for now # when gzip compression is requested, a gzip", "That done contains more than one Future. Thus it is", "provide a single output file # with headers prepended to", ">>> sql = \"SELECT * FROM schema.table\" >>> fut =", "first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if", "Thus it is necessary to account # for these possible", "Dict[str, Any] The detected info of the file as returned", "is ``False``, no parsing of types is performed and each", "def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): \"\"\"Check a single round", "Raises ------ CivisImportError If the values detected on the file", "result has been stored as a Civis file. Examples --------", "manifest file to conventional S3 UNLOAD statements except the CSV", "manifest file. Has the following keys: 'query': str The query.", "database name or ID. table : str The schema and", "header as first row) if `use_pandas` is ``False``, otherwise a", "write headers as gzip if compression == 'gzip': with gzip.open(local_path,", "UI. Returns ------- unload_manifest: dict A dictionary resembling an AWS", "stream=True) response.raise_for_status() # gzipped buffers can be concatenated so write", "and provided ' 'compressions - provided compression was {}' '", "\" \"Use `hidden` instead.\", FutureWarning) if client is None: client", "is None: # Instantiate client here in case users provide", "headers, compression) return callback def split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename'", "delimiter is None: delimiter = detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info,", "still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info", "sql, database, credential_id, polling_interval=None): headers = None try: # use", "credential will be used. polling_interval : int or float, optional", "run_id, filename, headers, compression) fut.add_done_callback(download) if archive: def f(x): return", "'ensure all imported files have the same ' 'compression.'.format( compression,", "``','``. unquoted: bool, optional Whether or not to quote fields.", "\"immediate\" One of \"delayed\" or \"immediate\". If \"immediate\", refresh column", "write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path,", "d.unused_data: to_decompress = d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS |", "data into memory. See Also -------- civis.io.read_civis : Read directly", "entry in manifest['entries']] >>> buf = BytesIO() >>> civis_to_file(ids[0], buf)", "force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql, database, job_name, credential_id,", "EmptyResultError, CivisImportError from civis.futures import CivisFuture from civis.io import civis_to_file,", "whether or not the source file has quotes escaped with", "to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional If not provided,", ") # Set values from first completed file cleaning -", "to use a more performant method for retrieving the data.", "delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the result of", ": list, optional A list of column names. Column SQL", "execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload a `pandas` `DataFrame`", "@deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\",", "civis._deprecation import deprecate_param import requests try: from io import StringIO", "multiple concurrent imports to the same destination table. credential_id :", "the column names in the Civis Table. credential_id : str", "headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut", "None else False csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id =", "delimiter, \"delimiter must be one of {}\".format( DELIMITERS.keys() ) try:", "int Upload data into this database. Can be the database", "need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures = [] for fid", "hidden=True, polling_interval=None): \"\"\"Export data from Civis to a local CSV", "import StringIO except ImportError: from cStringIO import StringIO try: import", "are accessible via both files endpoint IDs and presigned S3", "as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers and decompress", "pandas as pd NO_PANDAS = False except ImportError: NO_PANDAS =", "def _get_headers(client, sql, database, credential_id, polling_interval=None): headers = None try:", "URL ('https://...') 'unquoted': bool Whether the cells are quoted. 'compression':", "dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1,", "data from a Civis table. Parameters ---------- table : str", "ValueError(\"Provide a schema as part of the `table` input.\") db_id", "def _download_callback(job_id, run_id, filename, headers, compression): def callback(future): if not", "are correctly accounted for - # Since concurrent.futures.wait returns two", "CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key')", "to be incompatible for import. This may be the case", "_decompress_stream(response, fout) # decompress the stream, write headers, and zip", "not outputs: raise EmptyResultError(\"Query {} returned no output.\" .format(script_id)) url", "-------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client is None: client = APIClient(api_key=api_key)", "column delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. headers :", "imports to the same destination table. delimiter : string, optional", "use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read", "line of the CSV will be headers. Default: ``True``. compression:", "'w') as fout: arcname = path.basename(local_path) if arcname.split('.')[-1] == 'zip':", "periods must be double quoted, e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED", "to accommodate multiple concurrent imports to the same destination table.", "if compression == 'gzip': with gzip.open(local_path, 'wb') as fout: fout.write(headers)", "string. The custom SQL string will be executed twice; once", "Table. credential_id : str or int, optional The ID of", "or not the first row contains headers. credential_id : str", "file %s', file_id) return _download_file(url, filename, headers, compression) return callback", "= True except ValueError: table_exists = False # Use Preprocess", "retrieve headers and once to retrieve the data. This is", "[1, 2, 3], 'b': [4, 5, 6]}) >>> fut =", "source file(s) escape quotes with a backslash. Defaults to false.", "provided value for the file delimiter. compression: str The provided", "headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload", "will change in v2.0.0. database : str or int Upload", "credential_id or client.default_credential csv_settings = csv_settings or {} export_job =", "object. Examples -------- >>> sql = \"SELECT * FROM schema.table\"", "list[str], optional A list of the columns indicating a record", "DELETE, etc.). Parameters ---------- filename : str Download exported data", ">>> table = \"schema.table\" >>> database = \"my_data\" >>> columns", "compression unless include_header is set to False. In a future", "compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client, sql,", "'entries': list of dict Each dict has the following keys:", "delimiter=delimiter) >>> ids = [entry['id'] for entry in manifest['entries']] >>>", "to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source, destination, headers, name=import_name,", "return fut def _sql_script(client, sql, database, job_name, credential_id, hidden=False, csv_settings=None):", "tuple)): raise TypeError(\"columns must be a list, tuple or None\")", "list, optional A list of column names. Column SQL transformations", "\"\"\" if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers", "results of a SQL query into memory. civis.io.civis_to_csv : Write", "if delimited by double-quotes. Parameters ---------- table: str Either a", "consistent approach # if content-encoding is specified in the headers", "etc.). Parameters ---------- filename : str Download exported data into", "polling_interval) # include_header defaults to True in the API. include_header", "of column names. Default: ``True``. compression: str, optional Type of", "by double-quotes. Parameters ---------- table: str Either a Redshift schema", "is inconsistent chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32)", "dict, optional A dictionary of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`.", "import_job.id) return fut def _sql_script(client, sql, database, job_name, credential_id, hidden=False,", "detected headers - ' 'please ensure all imported files either", "[file_id] if schema is None: raise ValueError(\"Provide a schema as", "str Unsigned S3 URL ('s3://...') 'url_signed': str Signed S3 URL", "is \"upsert\", delayed executions move data from staging table to", "raise CivisImportError( 'Encountered the following errors for file {}:\\n\\t{}'.format( output_obj_id,", "most scenarios this will greatly # reduce the work that", "of `df` as the first argument to this function. Parameters", "Since concurrent.futures.wait returns two sets, it is possible # That", "OVER(ORDER BY date) AS order\"] >>> data = read_civis(table, database,", "Read directly into memory without SQL. civis.io.civis_to_csv : Write directly", "sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval,", "distkey : str, optional The column to use as the", "a backslash. Defaults to false. execution: string, optional, default \"immediate\"", "files should have the same number of ' 'columns. Expected", "same number of ' 'columns. Expected {} columns but file", "columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval,", "which returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query", "or None\") select = \", \".join(columns) if columns is not", "`use_pandas` is ``True`` and `pandas` is not installed. Examples --------", "the Civis Table. credential_id : str or int, optional The", "-------- >>> file_id = 100 >>> fut = civis.io.civis_file_to_table(file_id, ...", "approach # if content-encoding is specified in the headers #", "value indicating whether or not the source file has quotes", "required. Note that this is true regardless of whether the", "Megabytes each created file will be. unquoted: bool, optional Whether", "delimited by double-quotes. Parameters ---------- table: str Either a Redshift", "detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers - ' 'please ensure", ">>> col_a = [row[col_a_index] for row in data] >>> df", "output.\" .format(script_id)) url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results", "the source file(s) escape quotes with a backslash. Defaults to", "\"ROW_NUMBER() OVER(ORDER BY date) AS order\"] >>> data = read_civis(table,", "be one of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted,", "are provided, this limit applies across all files combined. existing_table_rows", "def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that base column types match", "columns from file cleaning with an appropriately blank column name.", "= outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis file", "24 hours. In addition, if existing_table_rows is \"upsert\", delayed executions", "schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup) != 2: raise ValueError(\"Cannot", "cleaned_file_ids = [] done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED )", "credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from a Civis", "{} columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs = [] for", "str Download exported data into this file. sql : str", "the import before failing. existing_table_rows : str, optional The behaviour", "existing_table_rows is \"upsert\", this field is required. escaped: bool, optional", "be filled. Raises ------ ValueError If the input ``table`` is", "and table name combined with a \".\", or else a", "Civis table \"\"\" client = client or APIClient() script_id, run_id", "type (e.g. INT, VARCHAR, DECIMAl) rather than that # they", "import_job = client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter,", "escaped: bool, optional A boolean value indicating whether or not", "to it due to how distributed databases export # data", "= \"SELECT * FROM schema.table\" >>> df = read_civis_sql(sql, \"my_database\",", "as it uses a 'PARALLEL ON' S3 unload. It returns", "df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})", "A boolean value indicating whether or not the source file", "than that # they have the same precision and length", "used. primary_keys: list[str], optional A list of the primary key", "versions >= 1.11 and will be removed in v2.0. Tables", "of the file should be treated as headers. The default,", "This reads the data into memory. See Also -------- civis.io.read_civis", "= next(reader) if len(schema_name_tup) == 1: schema_name_tup = (None, schema_name_tup[0])", ") if compression != detected_info['compression']: raise CivisImportError('Mismatch between detected and", "df = pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql \"\"\" if", "= DELIMITERS.get(delimiter) if not delimiter: raise ValueError(\"delimiter must be one", "uniquely identify a record. If existing_table_rows is \"upsert\", this field", "include_header defaults to True in the API. include_header = True", "the result has been stored as a Civis file. Examples", "not the first row contains headers. This parameter has no", "list[dict] \"\"\" new_cols = [] for i, col in enumerate(column_list):", "S3 URL ('https://...') 'unquoted': bool Whether the cells are quoted.", "Civis API. headers: bool The provided value for whether or", "or not the first row contains headers. This parameter has", "output_obj_id): \"\"\"Check that base column types match those current defined", "the primary key column(s) of the destination table that uniquely", "read only; select * from ({}) limit 1'.format(sql) fut =", "credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception as exc: #", "was {}' ' but detected compression {}. Please ' 'ensure", "match. Examples -------- >>> file_id = 100 >>> fut =", "same destination table. delimiter : string, optional The column delimiter.", "delimiter. compression: str The provided value for the file compression.", "file_to_civis, query_civis from civis.utils import run_job from civis._deprecation import deprecate_param", "civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',',", "optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list", "fut.result()[\"output\"] if not outputs: raise EmptyResultError(\"Unload query {} returned no", "against this database. Can be the database name or ID.", "str or int, optional The database credential ID. If ``None``,", "in the Civis Table. credential_id : str or int, optional", "be treated as headers. The default, ``None``, attempts to autodetect", "the case if their columns have different types, their delimiters", "dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple files are being", "%d', fut.run_id, import_job.id) return fut def _sql_script(client, sql, database, job_name,", "buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression): response", "name, but the ``tablename`` will always be filled. Raises ------", "delimiter, hidden, polling_interval=None): cleaning_futures = [] for fid in file_ids:", "of a local CSV file to Civis. Parameters ---------- filename", "civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None,", "unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df,", "json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database, table,", "not provided, will attempt to auto-detect. headers : bool, optional", ": Write directly to csv. civis.io.export_to_civis_file : Store a SQL", "optional If ``True`` (the default), this job will not appear", "columns in the table. delimiter : string, optional The column", "civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2,", "Write directly to a CSV file. \"\"\" if client is", "... 'scratch.df_table') >>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if", "------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>>", "db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential # Try", "their columns have different types, their delimiters are different, headers", "completes. hidden : bool, optional If ``True`` (the default), this", "the database name or ID. table : str The schema", "credential. If ``None``, the default credential will be used. polling_interval", "headers as gzip if compression == 'gzip': with gzip.open(local_path, 'wb')", "query. 'header': list of str The columns from the query.", "be the database name or ID. job_name : str, optional", "_download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download) if archive: def f(x):", "which # are still running. for result in concurrent.futures.as_completed(done |", "``'scratch.table'``. Schemas or tablenames with periods must be double quoted,", "will be loaded as an atomic unit in parallel, and", "special characters (such as '.') can only be included in", "included in a schema or table name if delimited by", "will not appear in the Civis UI. **kwargs : kwargs", "VARCHAR, DECIMAl) rather than that # they have the same", "if `use_pandas` is ``False``, no parsing of types is performed", "database. E.g. ``'my_schema.my_table'``. Schemas or tablenames with periods must be", "script_id, run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden)", "of ``','``, ``'\\\\t'`` or ``'|'``. headers : bool, optional Whether", "# reduce the work that Platform does to provide a", "import StringIO try: import pandas as pd NO_PANDAS = False", "job to complete See Also -------- civis.io.read_civis : Read table", "' 'columns. Expected {} columns but file {} ' 'has", "= credential_id or client.default_credential # don't fix bug that would", "credential_id, polling_interval) # include_header defaults to True in the API.", "db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential csv_settings =", "of table, including schema, in the database. E.g. ``'my_schema.my_table'``. Schemas", "[\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date) AS order\"] >>> data =", "executions move data from staging table to final table after", "-------- civis.io.read_civis : Read directly into memory without SQL. civis.io.civis_to_csv", "'{}' follow the pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list):", "that special characters (such as '.') can only be included", "tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type", "= APIClient() schema, table = split_schema_tablename(table) if isinstance(file_id, int): file_id", "else None) if headers is None: headers = detected_info['includeHeader'] if", "{}\".format( DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table {table} already exists", "seconds to wait between checks for query completion. archive :", "import requests try: from io import StringIO except ImportError: from", "job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True,", ">>> fut.result() # Wait for job to complete See Also", "field is required. escaped: bool, optional A boolean value indicating", "their own names parameter _kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression']", "'compressions - provided compression was {}' ' but detected compression", "part of the `table` input.\") db_id = client.get_database_id(database) cred_id =", "schema, tablename A 2-tuple of strings. The ``schema`` may be", "dict(col) if new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return", "and return presigned urls. This function is intended for unloading", "default credential will be used. include_header: bool, optional If ``True``,", "= _download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download) if archive: def", "upload to. E.g., ``'scratch.table'``. Schemas or tablenames with periods must", "dictionaries corresponding to the columns in the source file. Each", "(None, schema_name_tup[0]) if len(schema_name_tup) != 2: raise ValueError(\"Cannot parse schema", "E.g. ``'my_schema.my_table'``. Schemas or tablenames with periods must be double", "= 'none' # don't support parallel unload; the output format", "... 'scratch.my_data') >>> fut.result() \"\"\" if client is None: client", "``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle : str,", "the distkey for the table. sortkey1 : str, optional The", "optional Whether or not the first row of the file", "if schema is None: raise ValueError(\"Provide a schema as part", "Parameters ---------- filename : str Download exported data into this", "with zipfile.ZipFile(local_path, 'w') as fout: arcname = path.basename(local_path) if arcname.split('.')[-1]", "fout, CHUNK_SIZE) # write headers and decompress the stream elif", ">>> fut.result() \"\"\" if client is None: client = APIClient()", "local_path, headers, compression): response = requests.get(url, stream=True) response.raise_for_status() # gzipped", "The column delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. headers", "If ``None``, the default credential will be used. primary_keys: list[str],", "import gzip import zipfile from civis import APIClient from civis._utils", "has been updated. If existing_table_rows is \"upsert\", this field is", "read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True,", "fix bug that would cause breaking change for now #", ">>> file_id = 100 >>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database',", "The detected info of the file as returned by the", "---------- sql : str The SQL select string to be", "import warnings import zlib import gzip import zipfile from civis", "type checking, we care only that the types # share", "or ``'gzip'``. Default ``'none'``. delimiter: str, optional Which delimiter to", "destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types,", "into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed into :func:`python:csv.reader`", "for the table. sortkey1 : str, optional The column to", "CivisFuture from civis.io import civis_to_file, file_to_civis, query_civis from civis.utils import", "a gzip file is not actually returned # instead the", "be included. To store the index along with the other", "a compound sortkey for the table. table_columns : list[Dict[str, str]],", "# Since concurrent.futures.wait returns two sets, it is possible #", "arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut :", "list[dict] the list of columns from file cleaning. Returns --------", "wait between checks for query completion. archive : bool, optional", "table to be created. file_columns: List[Dict[str, str]] The columns detected", "query completion. hidden : bool, optional If ``True`` (the default),", "types # share a base type (e.g. INT, VARCHAR, DECIMAl)", "Reference by name to this argument is deprecated, as the", "query to a Civis file Parameters ---------- sql : str", "log.debug('Started run %d of SQL script %d', run_job.id, export_job.id) return", "None: # Instantiate client here in case users provide a", "`pandas` is not installed. Examples -------- >>> table = \"schema.table\"", "their expected attributes. \"\"\" if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch", "client=client, poll_on_creation=False) outputs = fut.result()[\"output\"] if not outputs: raise EmptyResultError(\"Unload", "client = client or APIClient() script_id, run_id = _sql_script(client=client, sql=sql,", "Civis table. All provided files will be loaded as an", "separable into a schema and table name. \"\"\" reader =", "SQL select string to be executed. database : str or", "credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client,", "such that changes in state cannot occur (e.g., INSERT, UPDATE,", "output format # is different which would introduce a breaking", "data from staging table to final table after a brief", "of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True,", "Returns -------- column_list: list[dict] \"\"\" new_cols = [] for i,", "for row in data] >>> df = read_civis(\"schema.table\", \"my_data\", use_pandas=True)", "Civis file. Examples -------- >>> sql = \"SELECT * FROM", ">>> fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result() See", "copy these columns regardless if there are more columns in", "%s\", str(exc)) return headers def _decompress_stream(response, buf, write_bytes=True): # use", "`hidden` instead.\", FutureWarning) db_id = client.get_database_id(database) credential_id = credential_id or", "`hidden` instead.\", FutureWarning) headers = False if kwargs.get('header') is False", "' 'compressions - provided compression was {}' ' but detected", "error_msgs = [] for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)):", "to account # for these possible completed cleaning runs while", "fut.run_id, import_job.id) return fut def _sql_script(client, sql, database, job_name, credential_id,", "fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None,", "\"\"\"Unload the result of SQL query and return presigned urls.", "credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data def export_to_civis_file(sql, database,", "containing a list of column names. Default: ``True``. compression: str,", ">>> database = \"my_data\" >>> columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER", "client is None: # Instantiate client here in case users", "to True in the API. include_header = True if headers", "In most scenarios this will greatly # reduce the work", "If ``None``, the default credential will be used. include_header: bool,", "buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8')", "argument to this function. Parameters ---------- df : :class:`pandas:pandas.DataFrame` The", "%s does not have any output to \" \"download. Not", "a table with the requested name already exists. One of", "number of rows with errors to remove from the import", "the contents of a local CSV file to Civis. Parameters", "columns in the table. headers : bool, optional [DEPRECATED] Whether", "**kwargs : kwargs Extra keyword arguments are passed into :func:`pandas:pandas.read_csv`", "the Civis API # to increase these values for the", "object will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str", "client is None: client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert", "and table you want to upload to. E.g., ``'scratch.table'``. client", "delimiter \"{}\" does not match ' 'detected delimiter for {}:", "any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'``", "run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def f(x): return client.scripts.put_sql_archive(script_id,", "FROM schema.table\" >>> fut = civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result()", "would introduce a breaking change headers = b'' delimiter =", "for the table. sortkey2 : str, optional The second column", "'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',':", "performant method for retrieving the data. The first execution of", "is required. Note that this is true regardless of whether", "\" \"Use `hidden` instead.\", FutureWarning) headers = False if kwargs.get('header')", "Notes ----- This reads the contents of `filename` into memory.", "to :func:`~pandas.DataFrame.to_csv`) to modify the column names in the Civis", "used. include_header: bool, optional If ``True`` include a key in", "------ ValueError If the input ``table`` is not separable into", "last_modified_keys: list[str], optional A list of the columns indicating a", "value indicating whether or not the source file(s) escape quotes", "detected_info: Dict[str, Any] The detected info of the file as", "One of \"delayed\" or \"immediate\". If \"immediate\", refresh column statistics", "more columns in the table. delimiter : string, optional The", "attributes. \"\"\" if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between detected", "csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs", "headers, compression): def callback(future): if not future.succeeded(): return outputs =", ":class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. max_errors :", ": Read table contents into memory. civis.io.read_civis_sql : Read results", "or not the first row of the file should be", "primary key. last_modified_keys: list[str], optional A list of the columns", "in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) headers = False", "delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the contents of one", "there might be differences in # their precisions/lengths - setting", "schema.table\" >>> df = read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a =", "\" \"Use `hidden` instead.\", FutureWarning) name = path.basename(filename) with open(filename,", "# If multiple files are being imported, there might be", "upload to Civis. database : str or int Upload data", "_check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that base column types match those", "If ``True`` (the default), this job will not appear in", "file ID under consideration; used for error messaging. Raises ------", "civis.io.read_civis : Read directly into memory without SQL. civis.io.read_civis_sql :", "will always be written with column names read from the", "= read_civis_sql(sql, \"my_database\") >>> columns = data.pop(0) >>> col_a_index =", "tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as fout: arcname =", "files are correctly accounted for - # Since concurrent.futures.wait returns", "SQL transformations are possible. If omitted, all columns are exported.", "CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()[\"output\"] if", ":func:`civis.APIClient.scripts.get_sql_runs` after the sql query has completed and the result", "compressions do not match. Examples -------- >>> file_id = 100", "= query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except", "first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns", "will be. unquoted: bool, optional Whether or not to quote", "not.') if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\" does", "ImportError: NO_PANDAS = True CHUNK_SIZE = 32 * 1024 log", "columns in the source file. Each dictionary should have keys", "headers : bool, optional [DEPRECATED] Whether or not the first", "of ``'even'``, ``'all'`` or ``'key'``. distkey : str, optional The", "= None try: # use 'begin read only;' to ensure", "headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns,", "double quoted, e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED str, optional Your", "is None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden)", "chunk or d.unused_data: if d.unused_data: to_decompress = d.unused_data + chunk", "are more columns in the table. primary_keys: list[str], optional A", "the table columns as needed # and perform necessary file", "for a deferred statistics update; column statistics may not be", "file ID or a list of Civis file IDs. Reference", "their precisions/lengths - setting this option will allow the Civis", "headers def _decompress_stream(response, buf, write_bytes=True): # use response.raw for a", "zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress = d.unconsumed_tail + chunk if", "arcname = path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0]", "UNLOAD statements except the CSV parts are accessible via both", "cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from first completed file", "use, if any. One of ``','``, ``'\\t'``, or ``'|'``. Default:", "hidden=hidden ) fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d", "name or ID. job_name : str, optional A name to", "decompress the stream, write headers, and zip the file elif", "!= detected_info['compression']: raise CivisImportError('Mismatch between detected and provided ' 'compressions", "%d of SQL script %d', run_job.id, export_job.id) return export_job.id, run_job.id", "as fout: arcname = path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname", "name if delimited by double-quotes. Parameters ---------- table: str Either", "must be one of {}\".format( DELIMITERS.keys() ) try: client.get_table_id(table, database)", "in state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters", ":envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database credential", "None: client = APIClient() schema, table = split_schema_tablename(table) if isinstance(file_id,", "assert delimiter, \"delimiter must be one of {}\".format( DELIMITERS.keys() )", "CSV will be headers. Default: ``True``. compression: str, optional Type", "---------- file_id : int or list[int] Civis file ID or", "need_table_columns, delimiter ) table_columns = table_columns or cleaned_table_columns source =", "in enumerate(zip(table_columns, file_columns)): # for the purposes of type checking,", "credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export data", "delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter )", "setting this option will allow the Civis API # to", "If ``True``, the first line of the CSV will be", "UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples", "``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle :", ">>> file_id = fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis : Read", "a SQL query's results in a Civis file \"\"\" if", ": Read results of a SQL query into memory. civis.io.export_to_civis_file", "parameter (which will be passed directly to :func:`~pandas.DataFrame.to_csv`) to modify", "split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename' string Remember that special characters", "but not others, or compressions do not match. Examples --------", "delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup = next(reader) if len(schema_name_tup) == 1:", "len(schema_name_tup) != 2: raise ValueError(\"Cannot parse schema and table. \"", "Please ' 'ensure all imported files have the same '", "tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index': False}", "run_id = _sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut", "\"Use `hidden` instead.\", FutureWarning) if client is None: client =", "int): file_id = [file_id] if schema is None: raise ValueError(\"Provide", "to wait between checks for query completion. hidden : bool,", "change in v2.0.0. database : str or int Upload data", "already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``.", "!= 2: raise ValueError(\"Cannot parse schema and table. \" \"Does", "is possible # That done contains more than one Future.", "\"Use `hidden` instead.\", FutureWarning) if client is None: # Instantiate", "credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0',", ">>> ids = [entry['id'] for entry in manifest['entries']] >>> buf", "It returns a similar manifest file to conventional S3 UNLOAD", "checks for job completion. hidden : bool, optional If ``True``", "\" \"Does '{}' follow the pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup)", ": kwargs Extra keyword arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`.", "whether or not the first row contains headers. This parameter", "One of ``','``, ``'\\\\t'`` or ``'|'``. If not provided, will", "as data: file_id = file_to_civis(data, name, client=client) log.debug('Uploaded file %s", ">>> df = read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a = df[\"column_a\"]", "returned no output.\" .format(script_id)) url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"]", "sortkey for the table. sortkey2 : str, optional The second", "civis.io.read_civis_sql : Read directly into memory using SQL. civis.io.civis_to_csv :", "= csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup = next(reader) if len(schema_name_tup)", "compression = 'none' # don't support parallel unload; the output", "detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id)", "sql, db_id, credential_id, polling_interval) # include_header defaults to True in", "\"\"\"Read data from Civis using a custom SQL string. The", "logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table',", "\"my_database\") >>> columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>>", "file_id = [file_id] if schema is None: raise ValueError(\"Provide a", "match their expected attributes. \"\"\" if headers != detected_info['includeHeader']: raise", "hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if", "is None: delimiter = detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers,", "outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s', file_id) return _download_file(url,", "transformations are possible. If omitted, all columns are exported. use_pandas", "= \"select {} from {}\".format(select, table) return sql def _get_headers(client,", "elif compression == 'zip': with TemporaryDirectory() as tmp_dir: tmp_path =", "# That done contains more than one Future. Thus it", "Either a Redshift schema and table name combined with a", ".format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null names in columns", "``False``, otherwise a `pandas` `DataFrame`. Note that if `use_pandas` is", "between detected headers - ' 'please ensure all imported files", "_get_headers(client, sql, database, credential_id, polling_interval=None): headers = None try: #", "Civis table. Parameters ---------- table : str Name of table,", "callback(future): if not future.succeeded(): return outputs = future.result().get(\"output\") if not", "'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if client is None:", "one. If inconsistencies are detected, raise an error. first_completed =", "= [entry['id'] for entry in manifest['entries']] >>> buf = BytesIO()", "custom SQL string will be executed twice; once to attempt", "must be a list, tuple or None\") select = \",", "CHUNK_SIZE = 32 * 1024 log = logging.getLogger(__name__) __all__ =", "provided, an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`.", "requests try: from io import StringIO except ImportError: from cStringIO", "APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas,", "import APIClient from civis._utils import maybe_get_random_name from civis.base import EmptyResultError,", "column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info,", "Civis. database : str or int Upload data into this", "file. Has the following keys: 'query': str The query. 'header':", "option will allow the Civis API # to increase these", "or ID. job_name : str, optional A name to give", "`DataFrame` to upload to Civis. database : str or int", "delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True):", "= _get_headers(client, sql, db_id, credential_id, polling_interval) # include_header defaults to", "# i.e. it was provided as an argument delimiter =", "compression == 'gzip' and include_header: compression = 'none' # don't", "for a more consistent approach # if content-encoding is specified", "import will only copy these columns regardless if there are", "by the Civis API for the file. output_obj_id: int The", "not provided, an :class:`civis.APIClient` object will be created from the", "Parameters ---------- table : str Name of table, including schema,", "is done to use a more performant method for retrieving", "= io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0',", "response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or d.unused_data:", "more performant method for retrieving the data. The first execution", "if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs", "raise ValueError(\"delimiter must be one of {}\" .format(DELIMITERS.keys())) # always", "Civis using a custom SQL string. The custom SQL string", "file_id) return _download_file(url, filename, headers, compression) return callback def split_schema_tablename(table):", "archive the import job as soon as it completes. hidden", "Preprocess endpoint to get the table columns as needed #", "``'all'`` or ``'key'``. distkey : str, optional The column to", "from the DataFrame. Use the `header` parameter (which will be", "the contents of this file. database : str or int", "E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional If not provided, an", "If multiple files are given and determined to be incompatible", "hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client,", "instead.\", FutureWarning) if client is None: client = APIClient(api_key=api_key) db_id", "Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------", "else \"*\" sql = \"select {} from {}\".format(select, table) return", ": str The SQL select string to be executed. database", "fout.write(headers) _decompress_stream(response, fout) # decompress the stream, write headers, and", "run. If \"delayed\", flag the table for a deferred statistics", "= split_schema_tablename(table) if isinstance(file_id, int): file_id = [file_id] if schema", "Unsigned S3 URL ('s3://...') 'url_signed': str Signed S3 URL ('https://...')", ": int or list[int] Civis file ID or a list", "completed cleaning runs while waiting on those which # are", "archive=False, hidden=True, **kwargs): \"\"\"Read data from a Civis table. Parameters", "provided as an argument delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter", ": str or int, optional The ID of the database", "or client.default_credential csv_settings = csv_settings or {} export_job = client.scripts.post_sql(job_name,", "Civis file ID or a list of Civis file IDs.", "# to increase these values for the data types provided,", "A `CivisFuture` object. Raises ------ CivisImportError If multiple files are", "length # (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0]", "select = \", \".join(columns) if columns is not None else", "of ``','``, ``'\\\\t'`` or ``'|'``. If not provided, will attempt", "into memory. civis.io.export_to_civis_file : Store a SQL query's results in", "database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False,", ": Read directly into memory using SQL. civis.io.civis_to_csv : Write", "job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None,", "into a Civis table. The `DataFrame`'s index will not be", "cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression,", "as part of the run. If \"delayed\", flag the table", "skipping column ' 'detection'.format(table=table)) table_exists = True except ValueError: table_exists", "\"SELECT * FROM schema.table\" >>> fut = export_to_civis_file(sql, \"my_database\") >>>", "using a custom SQL string. The custom SQL string will", "it due to how distributed databases export # data at", "schema and table name combined with a \".\", or else", "table. \" \"Does '{}' follow the pattern 'schema.table'?\" .format(table)) return", ") def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids = []", "file will be returned for all cases. delimiter: str, optional", "the same format. Parameters ---------- file_id : int or list[int]", "in columns from file cleaning with an appropriately blank column", "are still running. for result in concurrent.futures.as_completed(done | still_going): output_file", "inconsistent chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32) while", "failing. existing_table_rows : str, optional The behaviour if a table", "as gzip if compression == 'gzip': with gzip.open(local_path, 'wb') as", "cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): \"\"\"Check a single", "= client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns:", "= [] for i, col in enumerate(column_list): # Avoid mutating", "correctly accounted for - # Since concurrent.futures.wait returns two sets,", "\"\"\" if client is None: client = APIClient(api_key=api_key) if archive:", "`pandas` is not installed. Examples -------- >>> sql = \"SELECT", "filename : str Upload the contents of this file. database", "`df` as the first argument to this function. Parameters ----------", "buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0',", "by name to this argument is deprecated, as the name", "_download_callback(job_id, run_id, filename, headers, compression): def callback(future): if not future.succeeded():", "%s to Civis file %s', filename, file_id) fut = civis_file_to_table(file_id,", "database. Can be the database name or ID. use_pandas :", "name = path.basename(filename) with open(filename, \"rb\") as data: file_id =", "delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns) return", "buf.seek(0) data = list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def", "``'scratch.table'``. api_key : DEPRECATED str, optional Your Civis API key.", "parsing of types is performed and each row will be", "store the index along with the other values, use `df.reset_index()`", "the destination database itself requires a primary key. last_modified_keys: list[str],", "for retrieving the data. The first execution of the custom", "and table. \" \"Does '{}' follow the pattern 'schema.table'?\" .format(table))", "= (detected_info['tableColumns'] if need_table_columns else None) if headers is None:", "APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is deprecated and will be removed", "`CivisFuture` object. Raises ------ CivisImportError If multiple files are given", "csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client, sql, db_id,", "for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)): # for the", "S3 unload. It returns a similar manifest file to conventional", "is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or list A", "in the table. headers : bool, optional [DEPRECATED] Whether or", "quotes with a backslash. Defaults to false. execution: string, optional,", "CivisImportError('Provided delimiter \"{}\" does not match ' 'detected delimiter for", "**kwargs): \"\"\"Read data from a Civis table. Parameters ---------- table", "credential_id : str or int, optional The ID of the", "regardless of whether the destination database itself requires a primary", "zip the file elif compression == 'zip': with TemporaryDirectory() as", "fut.result() \"\"\" if client is None: client = APIClient(api_key=api_key) if", "destination table. polling_interval : int or float, optional Number of", "civis.io.read_civis_sql : Read results of a SQL query into memory.", "file(s) escape quotes with a backslash. Defaults to false. execution:", "= civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if", "filename prefix for the output file to have. Default: ``None``.", "the database name or ID. job_name : str, optional A", "database = \"my_database\" >>> delimiter = \"|\" >>> manifest =", "(the default), this job will not appear in the Civis", "table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True):", "= \"my_data\" >>> columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date)", "False if kwargs.get('header') is False else True with TemporaryDirectory() as", "not the first row of the file should be treated", "cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression,", "a Redshift schema and table name combined with a \".\",", "of the database credential. If ``None``, the default credential will", "or ``'|'``. Default: ``'|'``. max_file_size: int, optional Maximum number of", "between checks for query completion. archive : bool, optional (deprecated)", "= fut.result()['result_columns'] except Exception as exc: # NOQA log.debug(\"Failed to", "client.get_database_id(database) cred_id = credential_id or client.default_credential if delimiter is not", "tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers)", "api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from", "will be executed twice; once to attempt to retrieve headers", "optional Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`", "list[str], optional A list of the primary key column(s) of", "or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id,", "return export_job.id, run_job.id def _get_sql_select(table, columns=None): if columns and not", "col in enumerate(column_list): # Avoid mutating input arguments new_col =", "credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client,", "= outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s', file_id) return", "sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution,", "currently returns a file with no compression unless include_header is", "always be filled. Raises ------ ValueError If the input ``table``", "schema_name_tup[0]) if len(schema_name_tup) != 2: raise ValueError(\"Cannot parse schema and", "%s.\" % (job_id, run_id, filename), RuntimeWarning) return else: url =", "optional Whether or not to quote fields. Default: ``False``. polling_interval", "Parameters ---------- sql : str The SQL select string to", "``'none'``. delimiter: str, optional Which delimiter to use, if any.", "a list, tuple or None\") select = \", \".join(columns) if", "-------- column_list: list[dict] \"\"\" new_cols = [] for i, col", "civis_to_file, file_to_civis, query_civis from civis.utils import run_job from civis._deprecation import", "detected info of the file as returned by the Civis", "for result in concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs( result.job_id,", "to a local CSV file. The custom SQL string will", "name or ID. columns : list, optional A list of", "sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id,", "pd >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [4,", "been stored as a Civis file. Examples -------- >>> sql", "include_header: bool, optional If ``True``, the first line of the", "to autodetect whether or not the first row contains headers.", "pandas is not installed.\") if archive: warnings.warn(\"`archive` is deprecated and", "select * from ({}) limit 1'.format(sql) fut = query_civis(sql, database,", "ID under consideration; used for error messaging. Raises ------ CivisImportError", "results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> sql", "to retrieve headers due to %s\", str(exc)) return headers def", "try: import pandas as pd NO_PANDAS = False except ImportError:", "as soon as it completes. hidden : bool, optional If", "now # when gzip compression is requested, a gzip file", "FROM schema.my_big_table\" >>> database = \"my_database\" >>> delimiter = \"|\"", "compression) fut.add_done_callback(download) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f)", "enumerate(zip(table_columns, file_columns)): # for the purposes of type checking, we", "optional Type of compression to use, if any. One of", "buf) >>> buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter) See Also", "archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0',", "destination database itself requires a primary key. last_modified_keys: list[str], optional", "'PARALLEL ON' S3 unload. It returns a similar manifest file", "db_id = client.get_database_id(database) cred_id = credential_id or client.default_credential if delimiter", "used. polling_interval : int or float, optional Number of seconds", "client.default_credential # don't fix bug that would cause breaking change", ": Read directly into memory without SQL. civis.io.read_civis_sql : Read", "support parallel unload; the output format # is different which", "table_columns : list[Dict[str, str]], optional A list of dictionaries corresponding", "contains errors. delimiter: str The provided value for the file", "`CivisFuture` object. Examples -------- >>> import pandas as pd >>>", ": str or int Read data from this database. Can", "from Civis to a local CSV file. The custom SQL", "hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table, api_key=None,", "in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) db_id = client.get_database_id(database)", "split_schema_tablename(table) if isinstance(file_id, int): file_id = [file_id] if schema is", "of a query to a Civis file Parameters ---------- sql", "into this database. Can be the database name or ID.", "are more columns in the table. headers : bool, optional", "'begin read only;' to ensure we can't change state sql", "= fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis : Read directly into", "file_id = 100 >>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ...", "query completion. archive : bool, optional (deprecated) If ``True``, archive", "will only copy these columns regardless if there are more", "zipfile from civis import APIClient from civis._utils import maybe_get_random_name from", "import EmptyResultError, CivisImportError from civis.futures import CivisFuture from civis.io import", "``None``. polling_interval : int or float, optional Number of seconds", "\"\"\"Upload a `pandas` `DataFrame` into a Civis table. The `DataFrame`'s", "distribution style for the table. One of ``'even'``, ``'all'`` or", "\"Use `hidden` instead.\", FutureWarning) name = path.basename(filename) with open(filename, \"rb\")", "concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info", "that base column types match those current defined for the", "we care only that the types # share a base", "= file_to_civis(data, name, client=client) log.debug('Uploaded file %s to Civis file", "urls. Parameters ---------- sql : str The SQL select string", "a single table name. Returns ------- schema, tablename A 2-tuple", "input.\") db_id = client.get_database_id(database) cred_id = credential_id or client.default_credential if", "as '.') can only be included in a schema or", "FutureWarning) headers = False if kwargs.get('header') is False else True", "``None``, the default credential will be used. include_header: bool, optional", "not installed. Examples -------- >>> sql = \"SELECT * FROM", ">>> columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date) AS order\"]", "is not separable into a schema and table name. \"\"\"", "job name will be used. api_key : DEPRECATED str, optional", "have. Default: ``None``. polling_interval : int or float, optional Number", "is None else False csv_settings = dict(include_header=include_header, compression='gzip') script_id, run_id", "return a list of results from :func:`python:csv.reader`. job_name : str,", "credential_id or client.default_credential # Try to get headers separately. In", "txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return unload_manifest", "run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL script %d',", "unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database, table, api_key=None, client=None,", "in order to accommodate multiple concurrent imports to the same", "row contains headers. primary_keys: list[str], optional A list of the", "one or more Civis files to a Civis table. All", "zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers, compression): def callback(future): if", "compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the result", "sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id,", "\" \"Use `hidden` instead.\", FutureWarning) if client is None: #", "max_file_size: int, optional Maximum number of Megabytes each created file", "1.11 and will be removed in v2.0. Tables will always", ":meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples", "select string to be executed. database : str or int", "headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers, need_table_columns,", "future.result().get(\"output\") if not outputs: warnings.warn(\"Job %s, run %s does not", "force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures", "If not provided, an :class:`civis.APIClient` object will be created from", "across all files combined. existing_table_rows : str, optional The behaviour", "twice; once to attempt to retrieve headers and once to", "a similar manifest file to conventional S3 UNLOAD statements except", "UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises", "max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False,", "'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None, use_pandas=False, job_name=None,", "table = \"schema.table\" >>> database = \"my_data\" >>> columns =", "write headers, and zip the file elif compression == 'zip':", ": str Name of table, including schema, in the database.", "import before failing. If multiple files are provided, this limit", "destination table. delimiter : string, optional The column delimiter. One", "do not match their expected attributes. \"\"\" if headers !=", "while waiting on those which # are still running. for", ">= 1.11 and will be removed in v2.0. Tables will", "removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) headers =", "compression is requested, a gzip file is not actually returned", "= zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress = d.unconsumed_tail + chunk", "csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None,", "this argument is deprecated, as the name will change in", "str The columns from the query. 'entries': list of dict", "exists - skipping column ' 'detection'.format(table=table)) table_exists = True except", "hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return", "wait between checks for job completion. archive : bool, optional", "'headers') def dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None,", "endpoint IDs and presigned S3 urls. Parameters ---------- sql :", "the Civis API for the file. output_obj_id: int The file", "database : str or int Read data from this database.", "the table. One of ``'even'``, ``'all'`` or ``'key'``. distkey :", "The provided value for the file delimiter. compression: str The", "all imported files either ' 'have a header or do", "`use_pandas` is ``True`` or passed into :func:`python:csv.reader` if `use_pandas` is", "table name if delimited by double-quotes. Parameters ---------- table: str", "checks for job completion. archive : bool, optional (deprecated) If", "TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb')", "the table to be created. file_columns: List[Dict[str, str]] The columns", "ID. use_pandas : bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`.", "users provide a (deprecated) api_key client = APIClient(api_key=api_key) sql =", "with open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut =", "type mismatch, or differ in count. \"\"\" if len(table_columns) !=", "column delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. If not", "destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options =", "= dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys)", "table_columns is None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers, delimiter,", "pd NO_PANDAS = False except ImportError: NO_PANDAS = True CHUNK_SIZE", ">>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5,", "from the import before failing. If multiple files are provided,", "columns : list, optional A list of column names. Column", "or APIClient() script_id, run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id,", "bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a", "df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table) file_id = file_to_civis(tmp_path, name,", "you want to upload to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`,", "``'|'``. If not provided, will attempt to auto-detect. headers :", "{}' ' but detected compression {}. Please ' 'ensure all", "keyword arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True``", "The cleaned file's Civis ID. Used for debugging. Raises ------", "max_errors : int, optional The maximum number of rows with", "with the requested name already exists. One of ``'fail'``, ``'truncate'``,", "'CSV import to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source, destination,", "== 'gzip': with gzip.open(local_path, 'wb') as fout: fout.write(headers) with open(local_path,", "manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids = [entry['id'] for", "as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as", "= read_civis(table, database, columns=columns) >>> columns = data.pop(0) >>> col_a_index", "results to Civis file %s', file_id) return _download_file(url, filename, headers,", "'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS =", "maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential csv_settings", "or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings)", "filled. Raises ------ ValueError If the input ``table`` is not", "during download if compression == 'gzip' and include_header: compression =", "= ',' fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows,", "ids = [entry['id'] for entry in manifest['entries']] >>> buf =", "tmp_file) with zipfile.ZipFile(local_path, 'w') as fout: arcname = path.basename(local_path) if", "doublequote=True, quotechar='\"') schema_name_tup = next(reader) if len(schema_name_tup) == 1: schema_name_tup", ": list[Dict[str, str]], optional A list of dictionaries corresponding to", "\"Use `hidden` instead.\", FutureWarning) db_id = client.get_database_id(database) credential_id = credential_id", "databases export # data at scale. headers = _get_headers(client, sql,", "of the columns indicating a record has been updated. If", "import CivisFuture from civis.io import civis_to_file, file_to_civis, query_civis from civis.utils", "None: client = APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id = credential_id", "types match those current defined for the table. Parameters ----------", "fcol_base_type, tcol_base_type ) ) if error_msgs: raise CivisImportError( 'Encountered the", "a file with no compression unless include_header is set to", "EmptyResultError(\"Unload query {} returned no manifest.\" .format(script_id)) buf = io.BytesIO()", "str, optional A name to give the job. If omitted,", "a record. If existing_table_rows is \"upsert\", this field is required.", "export_job.id) return export_job.id, run_job.id def _get_sql_select(table, columns=None): if columns and", "file columns have a type mismatch, or differ in count.", "client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id):", "without SQL. civis.io.read_civis_sql : Read results of a SQL query", "is None: client = APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise", "one of {}\".format( DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table {table}", "\" \"download. Not creating file %s.\" % (job_id, run_id, filename),", "* FROM schema.table\" >>> fut = export_to_civis_file(sql, \"my_database\") >>> file_id", "done to use a more performant method for retrieving the", "= 100 >>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data')", "Notes ----- This reads the data into memory. See Also", "types provided, and decreases the # risk of a length-related", "occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- filename :", "table. One of ``'even'``, ``'all'`` or ``'key'``. distkey : str,", "A 2-tuple of strings. The ``schema`` may be None if", "10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if", "+ '\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs))", "@deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None,", "requests.get(url, stream=True) response.raise_for_status() with StringIO() as buf: if headers: buf.write(','.join(headers)", "import. This may be the case if their columns have", "index will not be included. To store the index along", "database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),", "job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval,", "the name will change in v2.0.0. database : str or", "return cleaned_file_ids, headers, compression, delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id):", "for the data types provided, and decreases the # risk", "for import. This may be the case if their columns", "table : str The schema and table you want to", "directly to :func:`~pandas.DataFrame.to_csv`) to modify the column names in the", "= path.basename(filename) with open(filename, \"rb\") as data: file_id = file_to_civis(data,", "not future.succeeded(): return outputs = future.result().get(\"output\") if not outputs: warnings.warn(\"Job", "the output format # is different which would introduce a", "def civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none',", "name = split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client) delimiter =", "CivisImportError('All files should have the same number of ' 'columns.", "table columns as needed # and perform necessary file cleaning", "created file will be. unquoted: bool, optional Whether or not", "a schema as part of the `table` input.\") db_id =", "INSERT, UPDATE, DELETE, etc.). Parameters ---------- filename : str Download", "if new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols", "\"{}\" does not match ' 'detected delimiter for {}: \"{}\"'.format(", "object will be created from the :envvar:`CIVIS_API_KEY`. max_errors : int,", "headers are present in some but not others, or compressions", "header or do not.') if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided", "columns and not isinstance(columns, (list, tuple)): raise TypeError(\"columns must be", "optional A list of dictionaries corresponding to the columns in", "ImportError If `use_pandas` is ``True`` and `pandas` is not installed.", "if arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path,", "outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s", "any. One of ``','``, ``'\\t'``, or ``'|'``. Default: ``'|'``. max_file_size:", "-------- civis.io.read_civis_sql : Read directly into memory using SQL. civis.io.civis_to_csv", "Whether the cells are quoted. 'compression': str Type of compression", "input arguments new_col = dict(col) if new_col.get('name') is None: new_col['name']", "provided, will attempt to auto-detect. headers : bool, optional Whether", "True CHUNK_SIZE = 32 * 1024 log = logging.getLogger(__name__) __all__", "directly into memory without SQL. civis.io.read_civis_sql : Read results of", "int or float, optional Number of seconds to wait between", "the data. This is done to use a more performant", ")[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns,", "will be headers. Default: ``True``. compression: str, optional Type of", "(which will be passed directly to :func:`~pandas.DataFrame.to_csv`) to modify the", "result of SQL query and return presigned urls. This function", "kwargs Extra keyword arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns", "fout: fout.write(headers) with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE)", "= requests.get(url, stream=True) response.raise_for_status() # gzipped buffers can be concatenated", "``'\\t'``, or ``'|'``. Default: ``'|'``. max_file_size: int, optional Maximum number", "from cStringIO import StringIO try: import pandas as pd NO_PANDAS", "used. api_key : DEPRECATED str, optional Your Civis API key.", "polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data def export_to_civis_file(sql, database, job_name=None,", "detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\" does not match ' 'detected", "``'even'``, ``'all'`` or ``'key'``. distkey : str, optional The column", "client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results of a query", "or not the first row contains headers. primary_keys: list[str], optional", "civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None,", "(detected_info['tableColumns'] if need_table_columns else None) if headers is None: headers", "The `DataFrame` to upload to Civis. database : str or", "passed into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns ------- data", "---------- table : str Name of table, including schema, in", "completion. archive : bool, optional (deprecated) If ``True``, archive the", "output_obj_id, len(file_columns)) ) error_msgs = [] for idx, (tcol, fcol)", "existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None,", "= APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be", "not to quote fields. Default: ``False``. prefix: str, optional A", ": str, optional The column to use as the distkey", "api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\",", "\"Does '{}' follow the pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup) def", "= concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from first", "table) import_job = client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows,", "escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id,", "One of ``','``, ``'\\\\t'`` or ``'|'``. headers : bool, optional", "csv. civis.io.export_to_civis_file : Store a SQL query's results in a", "outputs[0][\"output_name\"], file_id) if use_pandas: # allows users to enter their", "to increase these values for the data types provided, and", "API. headers: bool The provided value for whether or not", "of cleaning results as compared to provided values. Parameters ----------", "debugging. Raises ------ CivisImportError If the values detected on the", "{}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors,", "path import io import logging import os import shutil from", "with an appropriately blank column name. Parameters ---------- column_list: list[dict]", "'gzip' data = pd.read_csv(url, **_kwargs) else: response = requests.get(url, stream=True)", "client here in case users provide a (deprecated) api_key client", "not to quote fields. Default: ``False``. polling_interval : int or", "is not installed. Examples -------- >>> sql = \"SELECT *", "`DataFrame` into a Civis table. The `DataFrame`'s index will not", ":class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> import pandas as", "-------- >>> with open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\\\n1,2,3') >>>", "last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload the contents", "credential will be used. include_header: bool, optional If ``True``, the", ": str or int Export data from this database. Can", ": str or int Upload data into this database. Can", "be removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) db_id", "passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture`", "fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download =", ":class:`pandas:pandas.DataFrame` The `DataFrame` to upload to Civis. database : str", "client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None,", "None) if headers is None: headers = detected_info['includeHeader'] if delimiter", "to auto-detect. headers : bool, optional Whether or not the", "to a CSV file. civis.io.civis_file_to_table : Upload a Civis file", "and presigned S3 urls. Parameters ---------- sql : str The", "of str The columns from the query. 'entries': list of", ">>> buf = BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>>", "accommodate multiple concurrent imports to the same destination table. polling_interval", "checking, we care only that the types # share a", "[4, 5, 6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table')", "True with TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs", "if use_pandas: # allows users to enter their own names", "cleaned_file_ids.append(output_file.object_id) # Ensure that all results from files are correctly", "warnings.warn(\"`archive` is deprecated and will be removed in v2.0.0. \"", "Redshift schema and table name combined with a \".\", or", "for the purposes of type checking, we care only that", "client.get_database_id(database) credential_id = credential_id or client.default_credential # don't fix bug", "Returns ------- unload_manifest: dict A dictionary resembling an AWS manifest", "The column delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. If", "into memory using SQL. civis.io.civis_to_csv : Write directly to csv.", "done contains more than one Future. Thus it is necessary", "at scale. headers = _get_headers(client, sql, db_id, credential_id, polling_interval) #", "buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql", "outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s (%s)', outputs[0][\"output_name\"], file_id)", "first line of the CSV will be headers. Default: ``True``.", "arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname,", "= _sql_script(client, sql, database, job_name, credential_id, hidden, csv_settings=csv_settings) fut =", "pandas as pd >>> df = pd.DataFrame({'a': [1, 2, 3],", "CivisImportError('Mismatch between detected headers - ' 'please ensure all imported", "deprecate_param import requests try: from io import StringIO except ImportError:", "combined. existing_table_rows : str, optional The behaviour if a table", "polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database,", "intended for unloading large queries/tables from redshift as it uses", "api_key : DEPRECATED str, optional Your Civis API key. If", "``'scratch.\"my.table\"'``. api_key : DEPRECATED str, optional Your Civis API key.", "optional The distribution style for the table. One of ``'even'``,", "memory without SQL. civis.io.read_civis_sql : Read results of a SQL", "log.debug('Table {table} already exists - skipping column ' 'detection'.format(table=table)) table_exists", "# always set compression to gzip to reduce I/O csv_settings", "'delimiter': str Delimiter that separates the cells. Examples -------- >>>", "Civis files to a Civis table. All provided files will", "following keys: 'query': str The query. 'header': list of str", "fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut", "``'\\\\t'`` or ``'|'``. If not provided, will attempt to auto-detect.", "existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys,", "_get_headers(client, sql, db_id, credential_id, polling_interval) # include_header defaults to True", "False # Use Preprocess endpoint to get the table columns", "(deprecated) api_key client = APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data", "CivisImportError('Mismatch between detected and provided ' 'compressions - provided compression", "completion. hidden : bool, optional If ``True`` (the default), this", "output_file_id): \"\"\"Check a single round of cleaning results as compared", "\"immediate\". If \"immediate\", refresh column statistics as part of the", "query_civis from civis.utils import run_job from civis._deprecation import deprecate_param import", "breaking change for now # when gzip compression is requested,", "whether the destination database itself requires a primary key. last_modified_keys:", "to_decompress = d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8'))", "compression): response = requests.get(url, stream=True) response.raise_for_status() # gzipped buffers can", "delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. headers : bool,", "credential_id or client.default_credential if delimiter is not None: # i.e.", "one Future. Thus it is necessary to account # for", "attempt to retrieve headers and once to retrieve the data.", "Note that if `use_pandas` is ``False``, no parsing of types", "1'.format(sql) fut = query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers =", "The `DataFrame`'s index will not be included. To store the", "fut def _sql_script(client, sql, database, job_name, credential_id, hidden=False, csv_settings=None): job_name", "str(exc)) return headers def _decompress_stream(response, buf, write_bytes=True): # use response.raw", "Filename 'size': int File size in bytes 'url': str Unsigned", "file. output_obj_id: int The file ID under consideration; used for", "database : str or int Upload data into this database.", "is not actually returned # instead the gzip file is", ":func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A future which returns", "sortkey2], diststyle=diststyle) # If multiple files are being imported, there", "removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) name =", "f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"]", "use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data", "SQL script %d', run_job.id, export_job.id) return export_job.id, run_job.id def _get_sql_select(table,", "| still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info =", "distkey for the table. sortkey1 : str, optional The column", "fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive:", "Default ``'none'``. ``'gzip'`` currently returns a file with no compression", ">>> with open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut", "NO_PANDAS: raise ImportError(\"use_pandas is True but pandas is not installed.\")", "in versions >= 1.11 and will be removed in v2.0.", "count. \"\"\" if len(table_columns) != len(file_columns): raise CivisImportError('All files should", "fout) # decompress the stream, write headers, and zip the", "-------- civis.io.read_civis : Read directly into memory without SQL. civis.io.read_civis_sql", "= APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is deprecated and will be", "A user specified filename prefix for the output file to", "final table after a brief delay, in order to accommodate", "NOQA log.debug(\"Failed to retrieve headers due to %s\", str(exc)) return", "the list of columns from file cleaning. Returns -------- column_list:", "a deferred statistics update; column statistics may not be available", "= zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or d.unused_data: if d.unused_data:", "done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info", "= outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s (%s)', outputs[0][\"output_name\"],", "only be included in a schema or table name if", "to Civis. database : str or int Upload data into", "and `pandas` is not installed. Examples -------- >>> sql =", "'have a header or do not.') if delimiter != detected_info['columnDelimiter']:", "Each dict has the following keys: 'id': int File ID", "not separable into a schema and table name. \"\"\" reader", "as a Civis file. Examples -------- >>> sql = \"SELECT", "= \"SELECT * FROM schema.my_big_table\" >>> database = \"my_database\" >>>", "float, optional Number of seconds to wait between checks for", "client.get_table_id(table, database) log.debug('Table {table} already exists - skipping column '", "database : str or int Export data from this database.", "io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key',", "tuple or None\") select = \", \".join(columns) if columns is", ">>> col_a_index = columns.index(\"column_a\") >>> col_a = [row[col_a_index] for row", "of SQL query and return presigned urls. This function is", "some but not others, or compressions do not match. Examples", "file cleaning - other files will be # compared to", "# (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type", "a breaking change headers = b'' delimiter = DELIMITERS.get(delimiter) if", ">>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client is", "sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None,", "= _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut =", "if len(schema_name_tup) == 1: schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup)", "output file to have. Default: ``None``. polling_interval : int or", "List[Dict[str, str]] The columns detected by the Civis API for", "= d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk", "and each row will be a list of strings. Raises", "strings. Raises ------ ImportError If `use_pandas` is ``True`` and `pandas`", "str, optional Which delimiter to use, if any. One of", "FutureWarning) if client is None: # Instantiate client here in", "into memory. civis.io.civis_to_csv : Write directly to a CSV file.", "The first execution of the custom SQL is controlled such", "data from Civis to a local CSV file. The custom", "file \"\"\" if archive: warnings.warn(\"`archive` is deprecated and will be", "= { ',': 'comma', '\\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0',", "completed and the result has been stored as a Civis", "civis.io.civis_to_csv : Write directly to a CSV file. \"\"\" if", "of rows (with header as first row) if `use_pandas` is", "Parameters ---------- column_list: list[dict] the list of columns from file", "and zip the file elif compression == 'zip': with TemporaryDirectory()", "path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0] + '.csv'", "a more performant method for retrieving the data. The first", "``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file with no", "the file. output_obj_id: int The file ID under consideration; used", "maybe_get_random_name from civis.base import EmptyResultError, CivisImportError from civis.futures import CivisFuture", "order to accommodate multiple concurrent imports to the same destination", "that if `use_pandas` is ``False``, no parsing of types is", "_kwargs['compression'] = 'gzip' data = pd.read_csv(url, **_kwargs) else: response =", "elif compression == 'none': with open(local_path, 'wb') as fout: fout.write(headers)", "indicating whether or not the source file has quotes escaped", "executed. database : str or int Execute the query against", "bool, optional [DEPRECATED] Whether or not the first row of", ": bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return", "table. sortkey2 : str, optional The second column in a", "output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns", "fcol) in enumerate(zip(table_columns, file_columns)): # for the purposes of type", ": str, optional The distribution style for the table. One", "= path.basename(local_path) if arcname.split('.')[-1] == 'zip': arcname = arcname.split('.')[0] +", "int, optional The database credential ID. If ``None``, the default", "client = APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data = read_civis_sql(sql=sql,", "a Civis file. Examples -------- >>> sql = \"SELECT *", "= detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure", "fid in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True,", "that # they have the same precision and length #", "query against this database. Can be the database name or", "not match ' 'detected delimiter for {}: \"{}\"'.format( delimiter, output_file_id,", "returns a similar manifest file to conventional S3 UNLOAD statements", "True but pandas is not installed.\") if archive: warnings.warn(\"`archive` is", "in count. \"\"\" if len(table_columns) != len(file_columns): raise CivisImportError('All files", "``'gzip'`` currently returns a file with no compression unless include_header", "client = APIClient() schema, table = split_schema_tablename(table) if isinstance(file_id, int):", "table contents into memory. civis.io.read_civis_sql : Read results of a", "compression == 'none': with open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response,", "a type mismatch, or differ in count. \"\"\" if len(table_columns)", "def read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None,", "csv from os import path import io import logging import", "data] >>> df = read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a =", "the same order, and be in the same format. Parameters", "base type (e.g. INT, VARCHAR, DECIMAl) rather than that #", "= civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if", "types, their delimiters are different, headers are present in some", "file elif compression == 'zip': with TemporaryDirectory() as tmp_dir: tmp_path", "do not match. Examples -------- >>> file_id = 100 >>>", "Extra keyword arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is", "prefix: str, optional A user specified filename prefix for the", "stream, write headers, and zip the file elif compression ==", "A list of dictionaries corresponding to the columns in the", "``'scratch.table'``. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient`", ">>> sql = \"SELECT * FROM schema.my_big_table\" >>> database =", "ID. columns : list, optional A list of column names.", "of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns", "row will be a list of strings. Raises ------ ImportError", "return outputs = future.result().get(\"output\") if not outputs: warnings.warn(\"Job %s, run", "random job name will be used. api_key : DEPRECATED str,", "This parameter has no effect in versions >= 1.11 and", "rows (with header as first row) if `use_pandas` is ``False``,", ">>> col_a = [row[col_a_index] for row in data] Notes -----", "schema and table name. \"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True,", "col_a = [row[col_a_index] for row in data] Notes ----- This", "memory without SQL. civis.io.civis_to_csv : Write directly to a CSV", "the import before failing. If multiple files are provided, this", "for query completion. archive : bool, optional (deprecated) If ``True``,", "auto-detect. headers : bool, optional Whether or not the first", "_check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all", "SQL string will be executed twice; once to attempt to", "Examples -------- >>> sql = \"SELECT * FROM schema.table\" >>>", "# however, our use of content-encoding is inconsistent chunk =", "to_decompress = d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32)", "credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from Civis using", "as fout: fout.write(headers) _decompress_stream(response, fout) # decompress the stream, write", "different, headers are present in some but not others, or", "from ({}) limit 1'.format(sql) fut = query_civis(sql, database, client=client, credential_id=credential_id,", "file_id = fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis : Read directly", "the gzip file is decompressed during download if compression ==", "errors to remove from the import before failing. If multiple", "to a Civis table \"\"\" client = client or APIClient()", "specified filename prefix for the output file to have. Default:", "client=client, poll_on_creation=False) download = _download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download)", "database itself requires a primary key. last_modified_keys: list[str], optional A", "cleaned file's Civis ID. Used for debugging. Raises ------ CivisImportError", "civis.io.civis_file_to_table : Upload a Civis file to a Civis table", "32 * 1024 log = logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql',", "If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of", "API. include_header = True if headers is None else False", "------- data : :class:`pandas:pandas.DataFrame` or list A list of rows", "atomic unit in parallel, and should share the same columns", "``True`` (the default), this job will not appear in the", "optional Number of seconds to wait between checks for job", "is ``False``, otherwise a `pandas` `DataFrame`. Note that if `use_pandas`", "{'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data = pd.read_csv(url, **_kwargs)", "``'none'``. ``'gzip'`` currently returns a file with no compression unless", "csv_settings=None): job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id = credential_id", "already exists - skipping column ' 'detection'.format(table=table)) table_exists = True", "to pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A", "the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The ID", "'file_id') def civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None,", "detected and provided ' 'compressions - provided compression was {}'", "outputs: raise EmptyResultError(\"Query {} returned no output.\" .format(script_id)) url =", "in manifest['entries']] >>> buf = BytesIO() >>> civis_to_file(ids[0], buf) >>>", "dict has the following keys: 'id': int File ID 'name':", "csv_settings or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden,", "Parameters ---------- table_columns: List[Dict[str, str]] The columns for the table", "is None: client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter,", ": :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> import pandas", "---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame` to upload to Civis.", "regardless if there are more columns in the table. headers", "None: client = APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas", "Parameters ---------- filename : str Upload the contents of this", "directly to a CSV file. \"\"\" if client is None:", "can be concatenated so write headers as gzip if compression", "If the values detected on the file do not match", "string Remember that special characters (such as '.') can only", "info of the file as returned by the Civis API.", "outputs: warnings.warn(\"Job %s, run %s does not have any output", "len(table_columns) != len(file_columns): raise CivisImportError('All files should have the same", "scenarios this will greatly # reduce the work that Platform", "credential will be used. primary_keys: list[str], optional A list of", "or not the file contains errors. delimiter: str The provided", "from a Civis table. Parameters ---------- table : str Name", "the other values, use `df.reset_index()` instead of `df` as the", "column statistics may not be available for up to 24", "os import path import io import logging import os import", "will not appear in the Civis UI. Returns ------- unload_manifest:", "as returned by the Civis API. headers: bool The provided", "UI. **kwargs : kwargs Extra keyword arguments will be passed", "primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If", "Used for debugging. Raises ------ CivisImportError If the values detected", "hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False)", "Examples -------- >>> table = \"schema.table\" >>> database = \"my_data\"", "cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers,", "\"my_database\") >>> fut.result() # Wait for job to complete See", "one of {}\" .format(DELIMITERS.keys())) # always set compression to gzip", "of strings. The ``schema`` may be None if the input", "= False except ImportError: NO_PANDAS = True CHUNK_SIZE = 32", "list of strings. Raises ------ ImportError If `use_pandas` is ``True``", "sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False,", ": :class:`pandas:pandas.DataFrame` or list A list of rows (with header", "table after a brief delay, in order to accommodate multiple", "-------- >>> sql = \"SELECT * FROM schema.table\" >>> df", "column statistics as part of the run. If \"delayed\", flag", "the contents of `filename` into memory. Examples -------- >>> with", "= 'CSV import to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source,", "csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL script", "chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk", "[] done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set", "table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden)", "in a schema or table name if delimited by double-quotes.", "headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data = pd.read_csv(url, **_kwargs) else:", "name will change in v2.0.0. database : str or int", "decompress the stream elif compression == 'none': with open(local_path, 'wb')", "db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential # don't", "optional Maximum number of Megabytes each created file will be.", ": str The schema and table you want to upload", "a Civis table. All provided files will be loaded as", "those which # are still running. for result in concurrent.futures.as_completed(done", "job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval,", "compression was {}' ' but detected compression {}. Please '", "seconds to wait between checks for job completion. archive :", "be differences in # their precisions/lengths - setting this option", "headers: bool The provided value for whether or not the", "os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path,", "------ CivisImportError If multiple files are given and determined to", "= (None, schema_name_tup[0]) if len(schema_name_tup) != 2: raise ValueError(\"Cannot parse", "APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one", "columns but file {} ' 'has {} columns'.format( len(table_columns), output_obj_id,", "file %s (%s)', outputs[0][\"output_name\"], file_id) if use_pandas: # allows users", "'\\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database,", "a CSV file. civis.io.civis_file_to_table : Upload a Civis file to", "file_id) if use_pandas: # allows users to enter their own", "Examples -------- >>> import pandas as pd >>> df =", "= io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0)", "\"upsert\", this field is required. escaped: bool, optional A boolean", "to a Civis table. All provided files will be loaded", "before failing. If multiple files are provided, this limit applies", "columns have different types, their delimiters are different, headers are", "file cleaning need_table_columns = ((not table_exists or existing_table_rows == 'drop')", "with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path,", "names parameter _kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip'", "to get the table columns as needed # and perform", "run_id, filename), RuntimeWarning) return else: url = outputs[0][\"path\"] file_id =", "as exc: # NOQA log.debug(\"Failed to retrieve headers due to", ">>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids = [entry['id']", "running. for result in concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs(", "!= len(file_columns): raise CivisImportError('All files should have the same number", "tablenames with periods must be double quoted, e.g. ``'my_schema.\"my.table\"'``. database", "introduce a breaking change headers = b'' delimiter = DELIMITERS.get(delimiter)", "parts are accessible via both files endpoint IDs and presigned", "of the run. If \"delayed\", flag the table for a", "client.get_database_id(database) credential_id = credential_id or client.default_credential # Try to get", "= {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name", "column to use as the distkey for the table. sortkey1", "Raises ------ CivisImportError If multiple files are given and determined", "results in a Civis file \"\"\" if archive: warnings.warn(\"`archive` is", "not match their expected attributes. \"\"\" if headers != detected_info['includeHeader']:", "A name to give the job. If omitted, a random", "'url': str Unsigned S3 URL ('s3://...') 'url_signed': str Signed S3", "polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id, run_id, filename, headers, compression)", "be available for up to 24 hours. In addition, if", "``tablename`` will always be filled. Raises ------ ValueError If the", "'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter):", "\"\"\" if len(table_columns) != len(file_columns): raise CivisImportError('All files should have", "diststyle=diststyle) # If multiple files are being imported, there might", "compression {}. Please ' 'ensure all imported files have the", "# Avoid mutating input arguments new_col = dict(col) if new_col.get('name')", "table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False,", "execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the contents of", "---------- detected_info: Dict[str, Any] The detected info of the file", "(list, tuple)): raise TypeError(\"columns must be a list, tuple or", "`hidden` instead.\", FutureWarning) if client is None: # Instantiate client", "compression != detected_info['compression']: raise CivisImportError('Mismatch between detected and provided '", "`header` parameter (which will be passed directly to :func:`~pandas.DataFrame.to_csv`) to", "escape quotes with a backslash. Defaults to false. execution: string,", "if need_table_columns else None) if headers is None: headers =", "a random job name will be used. api_key : DEPRECATED", "buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt)", "If not given, the :envvar:`CIVIS_API_KEY` environment variable will be used.", "archive=False, hidden=True): \"\"\"Upload the contents of a local CSV file", "not outputs: warnings.warn(\"Job %s, run %s does not have any", "log.debug('Uploaded file %s to Civis file %s', filename, file_id) fut", "Returns ------- data : :class:`pandas:pandas.DataFrame` or list A list of", "= civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result() See Also --------", "= dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey,", "= dict(col) if new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col)", "{}\".format(select, table) return sql def _get_headers(client, sql, database, credential_id, polling_interval=None):", "remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle)", "S3 UNLOAD statements except the CSV parts are accessible via", "table you want to upload to. E.g., ``'scratch.table'``. Schemas or", "file should be treated as headers. The default, ``None``, attempts", "last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def", "primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload a", "filename, file_id) fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows,", "col_a = df[\"column_a\"] See Also -------- civis.io.read_civis_sql : Read directly", "filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id,", "will not be included. To store the index along with", "(job_id, run_id, filename), RuntimeWarning) return else: url = outputs[0][\"path\"] file_id", "``False``, no parsing of types is performed and each row", "compression='gzip') script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings,", "diststyle : str, optional The distribution style for the table.", "sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of", "polling_interval=None): \"\"\"Export data from Civis to a local CSV file.", "to conventional S3 UNLOAD statements except the CSV parts are", "don't fix bug that would cause breaking change for now", "'export_to_civis_file'] DELIMITERS = { ',': 'comma', '\\t': 'tab', '|': 'pipe',", "if d.unused_data: to_decompress = d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS", "\"immediate\", refresh column statistics as part of the run. If", "enter their own names parameter _kwargs = {'names': headers} _kwargs.update(kwargs)", "credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d", "\"\"\"Check a single round of cleaning results as compared to", "compression, delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that base", "from io import StringIO except ImportError: from cStringIO import StringIO", "polling_interval=None, hidden=True): \"\"\"Upload the contents of one or more Civis", "case if their columns have different types, their delimiters are", "two sets, it is possible # That done contains more", "fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\"", "for the table. One of ``'even'``, ``'all'`` or ``'key'``. distkey", "import logging import os import shutil from tempfile import TemporaryDirectory", "callback def split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename' string Remember that", "The file ID under consideration; used for error messaging. Raises", "the CSV parts are accessible via both files endpoint IDs", "it is necessary to account # for these possible completed", "from this database. Can be the database name or ID.", "all files combined. existing_table_rows : str, optional The behaviour if", "than one Future. Thus it is necessary to account #", "accessible via both files endpoint IDs and presigned S3 urls.", "= ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file']", "One of ``','``, ``'\\t'``, or ``'|'``. Default: ``','``. unquoted: bool,", "One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter: str,", "run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut", "optional Whether or not to quote fields. Default: ``False``. prefix:", "optional A name to give the job. If omitted, a", "str The SQL select string to be executed. database :", "api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None,", "'.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers, compression):", "Schemas or tablenames with periods must be double quoted, e.g.", "would cause breaking change for now # when gzip compression", "to. E.g., ``'scratch.table'``. Schemas or tablenames with periods must be", "UI. csv_settings : dict, optional A dictionary of csv_settings to", "directly to a CSV file. civis.io.civis_file_to_table : Upload a Civis", "for row in data] Notes ----- This reads the data", "log.debug('Exported results to Civis file %s (%s)', outputs[0][\"output_name\"], file_id) if", "``'|'``. max_file_size: int, optional Maximum number of Megabytes each created", ": bool, optional If ``True`` (the default), this job will", ">>> sql = \"SELECT * FROM schema.table\" >>> df =", "with errors to remove from the import before failing. existing_table_rows", "before failing. existing_table_rows : str, optional The behaviour if a", "there are more columns in the table. delimiter : string,", "is None: client = APIClient() schema, table = split_schema_tablename(table) if", "fout.write(headers) with open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) #", "job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data", "random job name will be used. client : :class:`civis.APIClient`, optional", "\"my_database\", use_pandas=True) >>> col_a = df[\"column_a\"] >>> data = read_civis_sql(sql,", "---------- filename : str Upload the contents of this file.", "client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns else None) if headers", "an AWS manifest file. Has the following keys: 'query': str", "'schema.table'?\" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null names in", "autodetect whether or not the first row contains headers. This", "data] Notes ----- This reads the data into memory. See", "file %s to Civis file %s', filename, file_id) fut =", "!= fcol_base_type: error_msgs.append( 'Column {}: File base type was {},", "as the sortkey for the table. sortkey2 : str, optional", "arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or", "not delimiter: raise ValueError(\"delimiter must be one of {}\" .format(DELIMITERS.keys()))", "csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id,", "Avoid mutating input arguments new_col = dict(col) if new_col.get('name') is", "Civis Table. credential_id : str or int, optional The ID", "an argument delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be", "credential_id or client.default_credential # don't fix bug that would cause", "query and return presigned urls. This function is intended for", "files endpoint IDs and presigned S3 urls. Parameters ---------- sql", "memory. civis.io.read_civis_sql : Read results of a SQL query into", "table name combined with a \".\", or else a single", ": :class:`~civis.futures.CivisFuture` A future which returns the response from :func:`civis.APIClient.scripts.get_sql_runs`", "= credential_id or client.default_credential if delimiter is not None: #", "This reads the contents of `filename` into memory. Examples --------", "or a list of Civis file IDs. Reference by name", "a (deprecated) api_key client = APIClient(api_key=api_key) sql = _get_sql_select(table, columns)", "as compared to provided values. Parameters ---------- detected_info: Dict[str, Any]", "the columns in the source file. Each dictionary should have", "for job to complete See Also -------- civis.io.read_civis : Read", "first row contains headers. primary_keys: list[str], optional A list of", "if isinstance(file_id, int): file_id = [file_id] if schema is None:", "this job will not appear in the Civis UI. **kwargs", "follow the pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace", "pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) >>> fut", "there are more columns in the table. primary_keys: list[str], optional", "conventional S3 UNLOAD statements except the CSV parts are accessible", "``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a", "source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys,", "None else \"*\" sql = \"select {} from {}\".format(select, table)", "A `CivisFuture` object. Examples -------- >>> sql = \"SELECT *", "list of results from :func:`python:csv.reader`. job_name : str, optional A", "specified in the headers # then response.iter_content will decompress the", "file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns:", "for - # Since concurrent.futures.wait returns two sets, it is", "or passed into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns -------", "execution: string, optional, default \"immediate\" One of \"delayed\" or \"immediate\".", "``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of results", "= split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client) delimiter = ','", "= columns.index(\"column_a\") >>> col_a = [row[col_a_index] for row in data]", "`hidden` instead.\", FutureWarning) if client is None: client = APIClient(api_key=api_key)", "export # data at scale. headers = _get_headers(client, sql, db_id,", "data into this database. Can be the database name or", "which would introduce a breaking change headers = b'' delimiter", "Read results of a SQL query into memory. civis.io.export_to_civis_file :", "with open(filename, \"rb\") as data: file_id = file_to_civis(data, name, client=client)", "a schema or table name if delimited by double-quotes. Parameters", ":class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> sql = \"SELECT", "columns detected by the Civis API for the file. output_obj_id:", "Any] The detected info of the file as returned by", "differ in count. \"\"\" if len(table_columns) != len(file_columns): raise CivisImportError('All", "Parameters ---------- file_id : int or list[int] Civis file ID", "to ensure we can't change state sql = 'begin read", "first row contains headers. credential_id : str or int, optional", "others, or compressions do not match. Examples -------- >>> file_id", "_get_sql_select(table, columns=None): if columns and not isinstance(columns, (list, tuple)): raise", "``'gzip'`` compressed file will be returned for all cases. delimiter:", "fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey,", "a local CSV file to Civis. Parameters ---------- filename :", "returned dictionary containing a list of column names. Default: ``True``.", "list of str The columns from the query. 'entries': list", "use `df.reset_index()` instead of `df` as the first argument to", "',': 'comma', '\\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def", "if `use_pandas` is ``True`` or passed into :func:`python:csv.reader` if `use_pandas`", "Also -------- civis.io.read_civis_sql : Read directly into memory using SQL.", "Default: ``'|'``. max_file_size: int, optional Maximum number of Megabytes each", "str, optional A user specified filename prefix for the output", "civis.io.civis_to_csv : Write directly to csv. civis.io.export_to_civis_file : Store a", "credential ID. If ``None``, the default credential will be used.", "= _run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers, compression,", "or ``'|'``. headers : bool, optional Whether or not the", "_process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter ) table_columns = table_columns", "Redshift 'schema.tablename' string Remember that special characters (such as '.')", "files either ' 'have a header or do not.') if", "primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload", "credential_id, polling_interval=None): headers = None try: # use 'begin read", "A future which returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after the", "client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must", "may not be available for up to 24 hours. In", "only a table name, but the ``tablename`` will always be", "len(schema_name_tup) == 1: schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup) !=", "-------- civis.APIClient.scripts.post_sql \"\"\" if client is None: client = APIClient(api_key=api_key)", ": :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> sql =", "Store a SQL query's results in a Civis file \"\"\"", "cleaning_futures, client, headers, need_table_columns, delimiter ) table_columns = table_columns or", "parameter has no effect in versions >= 1.11 and will", "hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL", "runs while waiting on those which # are still running.", "credential_id = credential_id or client.default_credential # Try to get headers", "CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id, run_id,", "results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------ CivisImportError If", "in the API. include_header = True if headers is None", "== 'zip': arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED)", "primary key column(s) of the destination table that uniquely identify", "appear in the Civis UI. Returns ------- unload_manifest: dict A", "has no effect in versions >= 1.11 and will be", "Signed S3 URL ('https://...') 'unquoted': bool Whether the cells are", "object. Notes ----- This reads the contents of `filename` into", "------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------ CivisImportError", "false. execution: string, optional, default \"immediate\" One of \"delayed\" or", "results from :func:`python:csv.reader`. job_name : str, optional A name to", "in the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A", "sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution,", "---------- table: str Either a Redshift schema and table name", ".format(DELIMITERS.keys())) # always set compression to gzip to reduce I/O", "credential. If ``None``, the default credential will be used. primary_keys:", "in some but not others, or compressions do not match.", "file as returned by the Civis API. headers: bool The", "delimiter: str The provided value for the file delimiter. compression:", "polling_interval=None): headers = None try: # use 'begin read only;'", "hidden : bool, optional If ``True`` (the default), this job", "the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture`", "sql = \"SELECT * FROM schema.my_big_table\" >>> database = \"my_database\"", "exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults", ") try: client.get_table_id(table, database) log.debug('Table {table} already exists - skipping", "but the ``tablename`` will always be filled. Raises ------ ValueError", "client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object", "= \"SELECT * FROM schema.table\" >>> fut = civis_to_csv(\"file.csv\", sql,", "seconds to wait between checks for job completion. hidden :", "being imported, there might be differences in # their precisions/lengths", "be created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The", "log.debug('Exported results to Civis file %s', file_id) return _download_file(url, filename,", "gzip to reduce I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted,", "content-encoding is specified in the headers # then response.iter_content will", "' 'detection'.format(table=table)) table_exists = True except ValueError: table_exists = False", "length-related import failure loosen_types = len(file_id) > 1 import_name =", "case users provide a (deprecated) api_key client = APIClient(api_key=api_key) sql", "from :func:`python:csv.reader`. job_name : str, optional A name to give", "poll_on_creation=False) outputs = fut.result()[\"output\"] if not outputs: raise EmptyResultError(\"Unload query", "compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids", "for error messaging. Raises ------ CivisImportError If the table columns", "shutil from tempfile import TemporaryDirectory import warnings import zlib import", "of the file as returned by the Civis API. headers:", "script_id, run_id = _sql_script(client, sql, database, job_name, credential_id, hidden, csv_settings=csv_settings)", "------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>>", "ensure we can't change state sql = 'begin read only;", "however, our use of content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE)", "``'\\t'``, or ``'|'``. Default: ``','``. unquoted: bool, optional Whether or", "* FROM schema.table\" >>> fut = civis_to_csv(\"file.csv\", sql, \"my_database\") >>>", "is True but pandas is not installed.\") if archive: warnings.warn(\"`archive`", "credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload", "or ``'|'``. If not provided, will attempt to auto-detect. headers", "= len(file_id) > 1 import_name = 'CSV import to {}.{}'.format(schema,", "the file as returned by the Civis API. headers: bool", "compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) #", "contains headers. primary_keys: list[str], optional A list of the primary", "messaging. Raises ------ CivisImportError If the table columns and the", "len(file_columns)) ) error_msgs = [] for idx, (tcol, fcol) in", "\"rb\") as data: file_id = file_to_civis(data, name, client=client) log.debug('Uploaded file", "credential_id = credential_id or client.default_credential csv_settings = csv_settings or {}", "the first row contains headers. credential_id : str or int,", "used. include_header: bool, optional If ``True``, the first line of", ":class:`pandas:pandas.DataFrame`. Otherwise, return a list of results from :func:`python:csv.reader`. job_name", "following keys: 'id': int File ID 'name': str Filename 'size':", "= path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response,", "compressed file will be returned for all cases. delimiter: str,", "be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int,", "of {}\" .format(DELIMITERS.keys())) # always set compression to gzip to", "(e.g. INT, VARCHAR, DECIMAl) rather than that # they have", "cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter ) table_columns", "df[\"column_a\"] See Also -------- civis.io.read_civis_sql : Read directly into memory", "Otherwise, return a list of results from :func:`python:csv.reader`. job_name :", "it uses a 'PARALLEL ON' S3 unload. It returns a", "columns in the table. primary_keys: list[str], optional A list of", "tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index':", "import zipfile from civis import APIClient from civis._utils import maybe_get_random_name", "csv_settings = csv_settings or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id,", "error messaging. Raises ------ CivisImportError If the table columns and", "a 'PARALLEL ON' S3 unload. It returns a similar manifest", "import json import concurrent.futures import csv from os import path", "response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query has completed and", "to the same destination table. delimiter : string, optional The", "need_table_columns = ((not table_exists or existing_table_rows == 'drop') and table_columns", "civis.io.read_civis : Read directly into memory without SQL. civis.io.civis_to_csv :", "the ``tablename`` will always be filled. Raises ------ ValueError If", "table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden)", "headers. credential_id : str or int, optional The ID of", "cases. delimiter: str, optional Which delimiter to use, if any.", "table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None,", "# Try to get headers separately. In most scenarios this", "exported. use_pandas : bool, optional If ``True``, return a :class:`pandas:pandas.DataFrame`.", "None: # i.e. it was provided as an argument delimiter", "compression: str, optional Type of compression to use, if any.", "= response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression): response = requests.get(url,", "= 'gzip' data = pd.read_csv(url, **_kwargs) else: response = requests.get(url,", "files are being imported, there might be differences in #", "be one of {}\".format( DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table", "from os import path import io import logging import os", "path.basename(filename) with open(filename, \"rb\") as data: file_id = file_to_civis(data, name,", "diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None,", "try: client.get_table_id(table, database) log.debug('Table {table} already exists - skipping column", "client.get_database_id(database) credential_id = credential_id or client.default_credential csv_settings = csv_settings or", "file {} ' 'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns)) )", "* FROM schema.my_big_table\" >>> database = \"my_database\" >>> delimiter =", "omitted, all columns are exported. use_pandas : bool, optional If", "be the case if their columns have different types, their", "filename : str Download exported data into this file. sql", "be written with column names read from the DataFrame. Use", "to the columns in the source file. Each dictionary should", "response.raise_for_status() # gzipped buffers can be concatenated so write headers", "file. sql : str The SQL select string to be", "of \"delayed\" or \"immediate\". If \"immediate\", refresh column statistics as", "int Export data from this database. Can be the database", "dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None,", "to upload to. E.g., ``'scratch.table'``. api_key : DEPRECATED str, optional", "list of Civis file IDs. Reference by name to this", "``True`` include a key in the returned dictionary containing a", "job will not appear in the Civis UI. csv_settings :", "existing_table_rows : str, optional The behaviour if a table with", "is \"upsert\", this field is required. Note that this is", "last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload a `pandas`", "row contains headers. credential_id : str or int, optional The", "values for the data types provided, and decreases the #", "the table columns and the file columns have a type", "if error_msgs: raise CivisImportError( 'Encountered the following errors for file", "the database credential. If ``None``, the default credential will be", "@deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None,", "@deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None, credential_id=None,", "Exception as exc: # NOQA log.debug(\"Failed to retrieve headers due", "------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes ----- This", "TemporaryDirectory import warnings import zlib import gzip import zipfile from", "tcol_base_type ) ) if error_msgs: raise CivisImportError( 'Encountered the following", "there are more columns in the table. headers : bool,", "return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): \"\"\"Check a", "query into memory. civis.io.export_to_civis_file : Store a SQL query's results", "or int, optional The ID of the database credential. If", "# don't support parallel unload; the output format # is", "format # is different which would introduce a breaking change", "file is decompressed during download if compression == 'gzip' and", "created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum", "the same columns in the same order, and be in", "\".join(columns) if columns is not None else \"*\" sql =", "\"delayed\", flag the table for a deferred statistics update; column", "results as compared to provided values. Parameters ---------- detected_info: Dict[str,", "{}. Please ' 'ensure all imported files have the same", "how distributed databases export # data at scale. headers =", "run_id = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut", "fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs: raise EmptyResultError(\"Query", "Type of compression to use, if any. One of ``'none'``,", "Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment", "the database. E.g. ``'my_schema.my_table'``. Schemas or tablenames with periods must", "detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return", "txt.seek(0) unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def", "\"SELECT * FROM schema.table\" >>> df = read_civis_sql(sql, \"my_database\", use_pandas=True)", ": :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------ CivisImportError If multiple", "return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table, client=None, max_errors=None,", "be in the same format. Parameters ---------- file_id : int", "change state sql = 'begin read only; select * from", "key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will be", "hidden=True): \"\"\"Unload the result of SQL query and return presigned", "true regardless of whether the destination database itself requires a", "def _replace_null_column_names(column_list): \"\"\"Replace null names in columns from file cleaning", "imports to the same destination table. polling_interval : int or", "See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client is None: client", ">>> data = read_civis(table, database, columns=columns) >>> columns = data.pop(0)", "allows users to enter their own names parameter _kwargs =", "with no compression unless include_header is set to False. In", ": bool, optional Whether or not the first row of", "controlled such that changes in state cannot occur (e.g., INSERT,", "headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut", "of the destination table that uniquely identify a record. If", "= dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client, sql, db_id, job_name,", "optional A list of the primary key column(s) of the", "' 'ensure all imported files have the same ' 'compression.'.format(", "compared to this one. If inconsistencies are detected, raise an", "requests.get(url, stream=True) response.raise_for_status() # gzipped buffers can be concatenated so", "civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result() # Wait for job to", "included. To store the index along with the other values,", "fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None,", "might be differences in # their precisions/lengths - setting this", "Try to get headers separately. In most scenarios this will", "be None if the input is only a table name,", "not the source file(s) escape quotes with a backslash. Defaults", "compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all results from files", "string will be executed twice; once to attempt to retrieve", "a record has been updated. If existing_table_rows is \"upsert\", this", "to. E.g., ``'scratch.table'``. api_key : DEPRECATED str, optional Your Civis", "SQL is controlled such that changes in state cannot occur", "if their columns have different types, their delimiters are different,", "= dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple files are", "compression, output_file_id): \"\"\"Check a single round of cleaning results as", "stream=True) response.raise_for_status() with StringIO() as buf: if headers: buf.write(','.join(headers) +", "import io import logging import os import shutil from tempfile", "the output file to have. Default: ``None``. polling_interval : int", "2: raise ValueError(\"Cannot parse schema and table. \" \"Does '{}'", "compound sortkey for the table. table_columns : list[Dict[str, str]], optional", "import os import shutil from tempfile import TemporaryDirectory import warnings", "provided, this limit applies across all files combined. existing_table_rows :", "File ID 'name': str Filename 'size': int File size in", "_process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids = [] done, still_going", "values. Parameters ---------- detected_info: Dict[str, Any] The detected info of", "DELIMITERS = { ',': 'comma', '\\t': 'tab', '|': 'pipe', }", "``True`` and `pandas` is not installed. Examples -------- >>> sql", "on the file do not match their expected attributes. \"\"\"", "the DataFrame. Use the `header` parameter (which will be passed", "or ID. columns : list, optional A list of column", "'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None,", "dict Each dict has the following keys: 'id': int File", "was {}, but expected {}'.format( idx, fcol_base_type, tcol_base_type ) )", "The default, ``None``, attempts to autodetect whether or not the", "if there are more columns in the table. headers :", "due to how distributed databases export # data at scale.", "the file should be treated as headers. The default, ``None``,", "from {}\".format(select, table) return sql def _get_headers(client, sql, database, credential_id,", "will allow the Civis API # to increase these values", "their delimiters are different, headers are present in some but", "= CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()[\"output\"]", "``'|'``. Default: ``','``. unquoted: bool, optional Whether or not to", "headers. This parameter has no effect in versions >= 1.11", "primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload the", "= file_to_civis(tmp_path, name, client=client) delimiter = ',' fut = civis_file_to_table(file_id,", "differences in # their precisions/lengths - setting this option will", "present in some but not others, or compressions do not", "Read directly into memory using SQL. civis.io.civis_to_csv : Write directly", "a more consistent approach # if content-encoding is specified in", "hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id =", "parse schema and table. \" \"Does '{}' follow the pattern", "with errors to remove from the import before failing. If", "A `CivisFuture` object. Examples -------- >>> import pandas as pd", "True if headers is None else False csv_settings = dict(include_header=include_header,", "as an atomic unit in parallel, and should share the", "table with the requested name already exists. One of ``'fail'``,", "the first row contains headers. This parameter has no effect", "not None: # i.e. it was provided as an argument", "only; select * from ({}) limit 1'.format(sql) fut = query_civis(sql,", "style for the table. One of ``'even'``, ``'all'`` or ``'key'``.", "raise ValueError(\"Provide a schema as part of the `table` input.\")", "[row[col_a_index] for row in data] >>> df = read_civis(\"schema.table\", \"my_data\",", "headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the contents of one or", "table. Parameters ---------- table_columns: List[Dict[str, str]] The columns for the", "cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- sql", "if delimiter is not None: # i.e. it was provided", "table you want to upload to. E.g., ``'scratch.table'``. api_key :", "keys for column \"name\" and \"sqlType\". The import will only", "2-tuple of strings. The ``schema`` may be None if the", "errors to remove from the import before failing. existing_table_rows :", "v2.0. Tables will always be written with column names read", "DataFrame. Use the `header` parameter (which will be passed directly", "cause breaking change for now # when gzip compression is", "user specified filename prefix for the output file to have.", "in the Civis UI. csv_settings : dict, optional A dictionary", "to get headers separately. In most scenarios this will greatly", "returned by the Civis API. headers: bool The provided value", "the same destination table. polling_interval : int or float, optional", "cells. Examples -------- >>> sql = \"SELECT * FROM schema.my_big_table\"", "other values, use `df.reset_index()` instead of `df` as the first", "fut = export_to_civis_file(sql, \"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"] See Also", "table. The `DataFrame`'s index will not be included. To store", "{} ' 'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs", "columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs = [] for idx,", "return presigned urls. This function is intended for unloading large", "AWS manifest file. Has the following keys: 'query': str The", "with the other values, use `df.reset_index()` instead of `df` as", "defaults to True in the API. include_header = True if", "statistics may not be available for up to 24 hours.", "to be executed. database : str or int Export data", "The provided value for the file compression. output_file_id: int The", "* from ({}) limit 1'.format(sql) fut = query_civis(sql, database, client=client,", "if headers != detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers -", "the table. table_columns : list[Dict[str, str]], optional A list of", "from civis.base import EmptyResultError, CivisImportError from civis.futures import CivisFuture from", "if need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter,", "(script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def f(x): return", "a local CSV file. The custom SQL string will be", "diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\",", "delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return", "client.default_credential # Try to get headers separately. In most scenarios", "len(table_columns), output_obj_id, len(file_columns)) ) error_msgs = [] for idx, (tcol,", "Read data from this database. Can be the database name", "a `pandas` `DataFrame` into a Civis table. The `DataFrame`'s index", "!= detected_info['includeHeader']: raise CivisImportError('Mismatch between detected headers - ' 'please", "# use response.raw for a more consistent approach # if", "1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type: error_msgs.append(", "headers, delimiter, compression, output_file_id): \"\"\"Check a single round of cleaning", "int or list[int] Civis file ID or a list of", "headers, and zip the file elif compression == 'zip': with", "to remove from the import before failing. existing_table_rows : str,", "the first row contains headers. primary_keys: list[str], optional A list", "indicating a record has been updated. If existing_table_rows is \"upsert\",", "def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store", "keyword arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut", "TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding':", "files combined. existing_table_rows : str, optional The behaviour if a", "credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id", "# allows users to enter their own names parameter _kwargs", "detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that", "= 32 * 1024 log = logging.getLogger(__name__) __all__ = ['read_civis',", "bool, optional If ``True``, the first line of the CSV", "e.g. ``'my_schema.\"my.table\"'``. database : str or int Read data from", "If existing_table_rows is \"upsert\", this field is required. Note that", "data. This is done to use a more performant method", "primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key')", "this is true regardless of whether the destination database itself", "fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client is None:", "Default: ``False``. prefix: str, optional A user specified filename prefix", "sql = \"SELECT * FROM schema.table\" >>> fut = export_to_civis_file(sql,", "----- This reads the data into memory. See Also --------", "'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with", "to attempt to retrieve headers and once to retrieve the", "size in bytes 'url': str Unsigned S3 URL ('s3://...') 'url_signed':", "autodetect whether or not the first row contains headers. primary_keys:", "a length-related import failure loosen_types = len(file_id) > 1 import_name", "None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client,", "should share the same columns in the same order, and", "ID 'name': str Filename 'size': int File size in bytes", "contains headers. This parameter has no effect in versions >=", "\"\"\" client = client or APIClient() script_id, run_id = _sql_script(client=client,", "large queries/tables from redshift as it uses a 'PARALLEL ON'", "= APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database,", "work that Platform does to provide a single output file", "be removed in v2.0. Tables will always be written with", "CivisImportError from civis.futures import CivisFuture from civis.io import civis_to_file, file_to_civis,", "if client is None: client = APIClient(api_key=api_key) if archive: warnings.warn(\"`archive`", "@deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None,", "wait between checks for job completion. hidden : bool, optional", "Examples -------- >>> sql = \"SELECT * FROM schema.my_big_table\" >>>", "outputs: raise EmptyResultError(\"Unload query {} returned no manifest.\" .format(script_id)) buf", "fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis : Read directly into memory", "file_id) fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle,", "been updated. If existing_table_rows is \"upsert\", this field is required.", "headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all results", "statements except the CSV parts are accessible via both files", "possible completed cleaning runs while waiting on those which #", "directly into memory without SQL. civis.io.civis_to_csv : Write directly to", "See Also -------- civis.io.read_civis : Read table contents into memory.", "deferred statistics update; column statistics may not be available for", "stored as a Civis file. Examples -------- >>> sql =", "of a SQL query into memory. civis.io.civis_to_csv : Write directly", "If ``True`` include a key in the returned dictionary containing", "compression used. 'delimiter': str Delimiter that separates the cells. Examples", "file. Examples -------- >>> sql = \"SELECT * FROM schema.table\"", "the file elif compression == 'zip': with TemporaryDirectory() as tmp_dir:", "to wait between checks for query completion. archive : bool,", ">>> fut = civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result() # Wait", "credential_id=None, include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload", "See Also -------- civis.APIClient.scripts.post_sql \"\"\" if client is None: client", "= table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema,", "gzip.open(local_path, 'wb') as fout: fout.write(headers) with open(local_path, 'ab') as fout:", "outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s',", "that separates the cells. Examples -------- >>> sql = \"SELECT", "_kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data =", "arguments new_col = dict(col) if new_col.get('name') is None: new_col['name'] =", "schema.my_big_table\" >>> database = \"my_database\" >>> delimiter = \"|\" >>>", "database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs):", "instead.\", FutureWarning) headers = False if kwargs.get('header') is False else", "single round of cleaning results as compared to provided values.", "# if content-encoding is specified in the headers # then", ":func:`~pandas.DataFrame.to_csv` \"\"\" if client is None: client = APIClient(api_key=api_key) if", "database credential ID. If ``None``, the default credential will be", "loosen_types = len(file_id) > 1 import_name = 'CSV import to", ": :class:`pandas:pandas.DataFrame` The `DataFrame` to upload to Civis. database :", "or client.default_credential # Try to get headers separately. In most", "v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) db_id = client.get_database_id(database) credential_id", "database, table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None,", "optional If not provided, an :class:`civis.APIClient` object will be created", "from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The", "This is done to use a more performant method for", "True in the API. include_header = True if headers is", "The columns from the query. 'entries': list of dict Each", "table name. Returns ------- schema, tablename A 2-tuple of strings.", "If omitted, a random job name will be used. api_key", "not the file contains errors. delimiter: str The provided value", "table_columns: List[Dict[str, str]] The columns for the table to be", ") error_msgs = [] for idx, (tcol, fcol) in enumerate(zip(table_columns,", "db_id, credential_id, polling_interval) # include_header defaults to True in the", "a `pandas` `DataFrame`. Note that if `use_pandas` is ``False``, no", "Use Preprocess endpoint to get the table columns as needed", "i.e. it was provided as an argument delimiter = DELIMITERS.get(delimiter)", "output_file_id: int The cleaned file's Civis ID. Used for debugging.", "str or int, optional The ID of the database credential.", "fut.add_done_callback(download) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return", "A list of the columns indicating a record has been", "escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename,", "DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table {table} already exists -", "This function is intended for unloading large queries/tables from redshift", "file's Civis ID. Used for debugging. Raises ------ CivisImportError If", "due to %s\", str(exc)) return headers def _decompress_stream(response, buf, write_bytes=True):", "cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id,", "is None: headers = detected_info['includeHeader'] if delimiter is None: delimiter", "base column types match those current defined for the table.", "instead the gzip file is decompressed during download if compression", "for up to 24 hours. In addition, if existing_table_rows is", "may be the case if their columns have different types,", "with periods must be double quoted, e.g. ``'scratch.\"my.table\"'``. api_key :", "data from this database. Can be the database name or", "will be returned for all cases. delimiter: str, optional Which", "``','``, ``'\\\\t'`` or ``'|'``. headers : bool, optional Whether or", "database credential. If ``None``, the default credential will be used.", "of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut :", "as _input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database',", "be concatenated so write headers as gzip if compression ==", "database. Can be the database name or ID. table :", "first row contains headers. This parameter has no effect in", "in case users provide a (deprecated) api_key client = APIClient(api_key=api_key)", "'please ensure all imported files either ' 'have a header", "= logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis',", "str, optional The column to use as the sortkey for", "Civis file %s', file_id) return _download_file(url, filename, headers, compression) return", "on those which # are still running. for result in", "= pd.read_csv(url, **_kwargs) else: response = requests.get(url, stream=True) response.raise_for_status() with", "will be created from the :envvar:`CIVIS_API_KEY`. credential_id : str or", "col_a = [row[col_a_index] for row in data] >>> df =", "deprecated and will be removed in v2.0.0. \" \"Use `hidden`", "of one or more Civis files to a Civis table.", "client=client) log.debug('Uploaded file %s to Civis file %s', filename, file_id)", "or not to quote fields. Default: ``False``. polling_interval : int", "table. table_columns : list[Dict[str, str]], optional A list of dictionaries", "= _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter, table_columns def _check_column_types(table_columns,", "if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers,", "if not outputs: raise EmptyResultError(\"Query {} returned no output.\" .format(script_id))", "characters (such as '.') can only be included in a", "with StringIO() as buf: if headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response,", "civis.base import EmptyResultError, CivisImportError from civis.futures import CivisFuture from civis.io", "if client is None: client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter)", "'api_key') def csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None,", "delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export data from Civis to", "increase these values for the data types provided, and decreases", "schema and table. \" \"Does '{}' follow the pattern 'schema.table'?\"", "bool The provided value for whether or not the file", "appear in the Civis UI. **kwargs : kwargs Extra keyword", "more columns in the table. primary_keys: list[str], optional A list", "client is None: client = APIClient() schema, table = split_schema_tablename(table)", "checks for query completion. hidden : bool, optional If ``True``", "database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception as", "have the same ' 'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures,", "if archive: warnings.warn(\"`archive` is deprecated and will be removed in", "fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}: File", "# include_header defaults to True in the API. include_header =", "contents of a local CSV file to Civis. Parameters ----------", "to this one. If inconsistencies are detected, raise an error.", ") ) if error_msgs: raise CivisImportError( 'Encountered the following errors", "Parameters ---------- detected_info: Dict[str, Any] The detected info of the", "if any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``.", "names. Default: ``True``. compression: str, optional Type of compression to", "' 'have a header or do not.') if delimiter !=", "up to 24 hours. In addition, if existing_table_rows is \"upsert\",", "sql def _get_headers(client, sql, database, credential_id, polling_interval=None): headers = None", "or compressions do not match. Examples -------- >>> file_id =", "database) log.debug('Table {table} already exists - skipping column ' 'detection'.format(table=table))", "to gzip to reduce I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter,", "bool Whether the cells are quoted. 'compression': str Type of", "be executed. database : str or int Execute the query", "_sql_script(client, sql, database, job_name, credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs,", "from files are correctly accounted for - # Since concurrent.futures.wait", "result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns']", "DELIMITERS.get(delimiter) if not delimiter: raise ValueError(\"delimiter must be one of", "removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) if client", "single output file # with headers prepended to it due", "pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A future", "If omitted, a random job name will be used. client", "or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file with", "else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression):", "executed twice; once to attempt to retrieve headers and once", "the table. sortkey1 : str, optional The column to use", "Whether or not to quote fields. Default: ``False``. polling_interval :", "**to_csv_kwargs) _, name = split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client)", "done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values", "credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) #", "'size': int File size in bytes 'url': str Unsigned S3", "be used. primary_keys: list[str], optional A list of the primary", "fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None,", "FROM schema.table\" >>> df = read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a", "parallel unload; the output format # is different which would", "The second column in a compound sortkey for the table.", "not match. Examples -------- >>> file_id = 100 >>> fut", "= [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date) AS order\"] >>> data", "future.succeeded(): return outputs = future.result().get(\"output\") if not outputs: warnings.warn(\"Job %s,", "a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of results from :func:`python:csv.reader`.", "of seconds to wait between checks for job completion. hidden", "the # risk of a length-related import failure loosen_types =", "run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id, run_id, filename, headers,", "The column to use as the sortkey for the table.", "if existing_table_rows is \"upsert\", delayed executions move data from staging", "= os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs)", "response.iter_content will decompress the stream # however, our use of", "filename, headers, compression) return callback def split_schema_tablename(table): \"\"\"Split a Redshift", "used. 'delimiter': str Delimiter that separates the cells. Examples --------", "execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database,", "str or int Read data from this database. Can be", "concatenated so write headers as gzip if compression == 'gzip':", "in data] Notes ----- This reads the data into memory.", "'columns. Expected {} columns but file {} ' 'has {}", "file_id = file_to_civis(data, name, client=client) log.debug('Uploaded file %s to Civis", "bytes 'url': str Unsigned S3 URL ('s3://...') 'url_signed': str Signed", "be double quoted, e.g. ``'my_schema.\"my.table\"'``. database : str or int", "ImportError(\"use_pandas is True but pandas is not installed.\") if archive:", "contents of `filename` into memory. Examples -------- >>> with open('input_file.csv',", "delimiter = DELIMITERS.get(delimiter) if not delimiter: raise ValueError(\"delimiter must be", "str The schema and table you want to upload to.", "remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started run", "blank column name. Parameters ---------- column_list: list[dict] the list of", "waiting on those which # are still running. for result", "bug that would cause breaking change for now # when", "with TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path,", "manifest.\" .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt =", "split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client) delimiter = ',' fut", "here in case users provide a (deprecated) api_key client =", "not appear in the Civis UI. **kwargs : kwargs Extra", ": bool, optional (deprecated) If ``True``, archive the import job", "delimiter: str, optional Which delimiter to use, if any. One", "def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key')", "for the file. output_obj_id: int The file ID under consideration;", "%d', run_job.id, export_job.id) return export_job.id, run_job.id def _get_sql_select(table, columns=None): if", "# write headers and decompress the stream elif compression ==", "must be one of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter,", "to this argument is deprecated, as the name will change", "unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client, sql, db_id, job_name,", "io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest", "and will be removed in v2.0. Tables will always be", "job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id = credential_id or", "give the job. If omitted, a random job name will", ">>> df = pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql \"\"\"", "detected, raise an error. first_completed = done.pop() output_file = client.jobs.list_runs_outputs(", "**kwargs) return data def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None,", "else True with TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv')", "fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows=\"fail\",", "hidden=True): \"\"\"Upload the contents of a local CSV file to", "sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id,", "or else a single table name. Returns ------- schema, tablename", "for job completion. archive : bool, optional (deprecated) If ``True``,", "error. first_completed = done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0]", "polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from Civis using a", ":class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes ----- This reads the contents", "int Read data from this database. Can be the database", "endpoint to get the table columns as needed # and", "------ CivisImportError If the table columns and the file columns", "_replace_null_column_names(column_list): \"\"\"Replace null names in columns from file cleaning with", "installed.\") if archive: warnings.warn(\"`archive` is deprecated and will be removed", "StringIO try: import pandas as pd NO_PANDAS = False except", "\"delimiter must be one of {}\".format( DELIMITERS.keys() ) try: client.get_table_id(table,", "str The provided value for the file delimiter. compression: str", ": Read results of a SQL query into memory. civis.io.civis_to_csv", "delay, in order to accommodate multiple concurrent imports to the", "5, 6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>>", "sortkey for the table. table_columns : list[Dict[str, str]], optional A", "the Civis UI. **kwargs : kwargs Extra keyword arguments are", "given and determined to be incompatible for import. This may", "may be None if the input is only a table", "indicating whether or not the source file(s) escape quotes with", "of seconds to wait between checks for query completion. archive", "used for error messaging. Raises ------ CivisImportError If the table", "for debugging. Raises ------ CivisImportError If the values detected on", "In a future release, a ``'gzip'`` compressed file will be", "else: response = requests.get(url, stream=True) response.raise_for_status() with StringIO() as buf:", "or client.default_credential # don't fix bug that would cause breaking", "sql, \"my_database\") >>> fut.result() # Wait for job to complete", "of dict Each dict has the following keys: 'id': int", "Civis UI. **kwargs : kwargs Extra keyword arguments are passed", "of columns from file cleaning. Returns -------- column_list: list[dict] \"\"\"", "the file do not match their expected attributes. \"\"\" if", "filename, headers, compression) fut.add_done_callback(download) if archive: def f(x): return client.scripts.put_sql_archive(script_id,", "columns.index(\"column_a\") >>> col_a = [row[col_a_index] for row in data] >>>", "is None: raise ValueError(\"Provide a schema as part of the", "# when gzip compression is requested, a gzip file is", "`table` input.\") db_id = client.get_database_id(database) cred_id = credential_id or client.default_credential", "{}: File base type was {}, but expected {}'.format( idx,", "default), this job will not appear in the Civis UI.", "If ``True``, archive the import job as soon as it", "Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples --------", "the pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null", "a single output file # with headers prepended to it", "else: to_decompress = d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress)) else:", "data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None,", "_sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs,", "# instead the gzip file is decompressed during download if", ">>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>> fut.result()", "buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers,", "data = read_civis_sql(sql, \"my_database\") >>> columns = data.pop(0) >>> col_a_index", "column names in the Civis Table. credential_id : str or", "statistics update; column statistics may not be available for up", "of SQL script %d', run_job.id, export_job.id) return export_job.id, run_job.id def", "= read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden,", "names. Column SQL transformations are possible. If omitted, all columns", "Read results of a SQL query into memory. civis.io.civis_to_csv :", "run_job from civis._deprecation import deprecate_param import requests try: from io", "expected {}'.format( idx, fcol_base_type, tcol_base_type ) ) if error_msgs: raise", "a list of strings. Raises ------ ImportError If `use_pandas` is", "and decreases the # risk of a length-related import failure", "deprecated, as the name will change in v2.0.0. database :", "Read directly into memory without SQL. civis.io.read_civis_sql : Read results", "> 1 import_name = 'CSV import to {}.{}'.format(schema, table) import_job", "the types # share a base type (e.g. INT, VARCHAR,", "outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs: raise EmptyResultError(\"Query {}", "arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename,", "io import logging import os import shutil from tempfile import", "or float, optional Number of seconds to wait between checks", "or ``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional The", "%s', filename, file_id) fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors,", "determined to be incompatible for import. This may be the", "APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True but", "'drop') and table_columns is None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns,", "| 32) else: to_decompress = d.unconsumed_tail + chunk if write_bytes:", "each created file will be. unquoted: bool, optional Whether or", "{}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size)", "concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from first completed", "diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False,", "import failure loosen_types = len(file_id) > 1 import_name = 'CSV", "\"\"\"Replace null names in columns from file cleaning with an", "Civis UI. Returns ------- unload_manifest: dict A dictionary resembling an", "CSV file to Civis. Parameters ---------- filename : str Upload", "if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def _download_file(url,", "Examples -------- >>> with open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\\\n1,2,3')", "omitted, a random job name will be used. client :", "job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval,", "multiple files are being imported, there might be differences in", "of the custom SQL is controlled such that changes in", "**kwargs : kwargs Extra keyword arguments will be passed to", "Raises ------ ImportError If `use_pandas` is ``True`` and `pandas` is", "Civis. Parameters ---------- filename : str Upload the contents of", "client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None,", "= read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a = df[\"column_a\"] See Also", "distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped,", "``','``, ``'\\\\t'`` or ``'|'``. If not provided, will attempt to", "delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\" does not match", "or not the source file(s) escape quotes with a backslash.", "manifest['entries']] >>> buf = BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0)", "CSV file. civis.io.civis_file_to_table : Upload a Civis file to a", "(script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id, run_id, filename,", "detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id)", "* 1024 log = logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv',", "output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers,", "you want to upload to. E.g., ``'scratch.table'``. Schemas or tablenames", "this field is required. escaped: bool, optional A boolean value", "_sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs,", "default credential will be used. include_header: bool, optional If ``True``", "if a table with the requested name already exists. One", "a CSV file. \"\"\" if client is None: client =", "str, optional Type of compression to use, if any. One", "sql, database, job_name, credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id", "Tables will always be written with column names read from", "gzip compression is requested, a gzip file is not actually", "db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),", "include_header: bool, optional If ``True`` include a key in the", "= client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id)", "to the same destination table. polling_interval : int or float,", "columns is not None else \"*\" sql = \"select {}", "headers and decompress the stream elif compression == 'none': with", "Download exported data into this file. sql : str The", "sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple files are being imported,", ": str, optional A name to give the job. If", "credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=False, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0',", "client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info,", "The ID of the database credential. If ``None``, the default", "is decompressed during download if compression == 'gzip' and include_header:", "outputs = fut.result()[\"output\"] if not outputs: raise EmptyResultError(\"Unload query {}", "with TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs =", "for job completion. hidden : bool, optional If ``True`` (the", "column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut", "= response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or", "= arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id,", "delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that base column", "that uniquely identify a record. If existing_table_rows is \"upsert\", this", ">>> buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter) See Also --------", "as the distkey for the table. sortkey1 : str, optional", "the same destination table. credential_id : str or int, optional", "Civis API for the file. output_obj_id: int The file ID", "-------- >>> sql = \"SELECT * FROM schema.table\" >>> fut", "**_kwargs) else: response = requests.get(url, stream=True) response.raise_for_status() with StringIO() as", "the file columns have a type mismatch, or differ in", "'url_signed': str Signed S3 URL ('https://...') 'unquoted': bool Whether the", "memory using SQL. civis.io.civis_to_csv : Write directly to csv. civis.io.export_to_civis_file", "if any. One of ``','``, ``'\\t'``, or ``'|'``. Default: ``','``.", "'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table) file_id", "polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception as exc: # NOQA", "import csv from os import path import io import logging", "open(filename, \"rb\") as data: file_id = file_to_civis(data, name, client=client) log.debug('Uploaded", "to \" \"download. Not creating file %s.\" % (job_id, run_id,", "to Civis file %s (%s)', outputs[0][\"output_name\"], file_id) if use_pandas: #", "API # to increase these values for the data types", "cleaning with an appropriately blank column name. Parameters ---------- column_list:", "path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file)", "memory. Examples -------- >>> with open('input_file.csv', 'w') as _input: ...", "compression) return callback def split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename' string", "import maybe_get_random_name from civis.base import EmptyResultError, CivisImportError from civis.futures import", "civis.io.read_civis : Read table contents into memory. civis.io.read_civis_sql : Read", "Civis API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable", "from Civis using a custom SQL string. The custom SQL", "'compression': str Type of compression used. 'delimiter': str Delimiter that", "accommodate multiple concurrent imports to the same destination table. credential_id", "v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) if client is None:", "instead.\", FutureWarning) if client is None: # Instantiate client here", "files are given and determined to be incompatible for import.", "= CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download = _download_callback(script_id,", "df[\"column_a\"] >>> data = read_civis_sql(sql, \"my_database\") >>> columns = data.pop(0)", "error_msgs: raise CivisImportError( 'Encountered the following errors for file {}:\\n\\t{}'.format(", "redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple files", "order, and be in the same format. Parameters ---------- file_id", "NO_PANDAS = True CHUNK_SIZE = 32 * 1024 log =", "str, optional The distribution style for the table. One of", "csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture`", "need_table_columns, delimiter): cleaned_file_ids = [] done, still_going = concurrent.futures.wait( cleaning_futures,", "including schema, in the database. E.g. ``'my_schema.my_table'``. Schemas or tablenames", "not appear in the Civis UI. Returns ------- unload_manifest: dict", "available for up to 24 hours. In addition, if existing_table_rows", "buf, write_bytes=True): # use response.raw for a more consistent approach", "only copy these columns regardless if there are more columns", "occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- sql :", "= client.get_database_id(database) cred_id = credential_id or client.default_credential if delimiter is", "optional [DEPRECATED] Whether or not the first row of the", "@deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None,", ": int, optional The maximum number of rows with errors", "query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception", "be used. polling_interval : int or float, optional Number of", "whether or not the file contains errors. delimiter: str The", "the run. If \"delayed\", flag the table for a deferred", "with headers prepended to it due to how distributed databases", "= client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs: raise EmptyResultError(\"Query {} returned", "our use of content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE) d", "32) while chunk or d.unused_data: if d.unused_data: to_decompress = d.unused_data", "If multiple files are provided, this limit applies across all", "polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()[\"output\"] if not outputs: raise", "file_to_civis(tmp_path, name, client=client) delimiter = ',' fut = civis_file_to_table(file_id, database,", "100 >>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>>", "(e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type =", "sortkey1 : str, optional The column to use as the", "separately. In most scenarios this will greatly # reduce the", "the index along with the other values, use `df.reset_index()` instead", "detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns,", "\"|\" >>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids =", "= done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info =", "\"*\" sql = \"select {} from {}\".format(select, table) return sql", "between checks for query completion. hidden : bool, optional If", "for these possible completed cleaning runs while waiting on those", "io import StringIO except ImportError: from cStringIO import StringIO try:", "exported data into this file. sql : str The SQL", "'api_key', 'headers') def dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\",", "file cleaning. Returns -------- column_list: list[dict] \"\"\" new_cols = []", "if `use_pandas` is ``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or", "SQL. civis.io.civis_to_csv : Write directly to csv. civis.io.export_to_civis_file : Store", "archive : bool, optional (deprecated) If ``True``, archive the import", "column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client, sql, db_id,", "fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples -------- >>> import", "columns=None): if columns and not isinstance(columns, (list, tuple)): raise TypeError(\"columns", "result in concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id", "installed. Examples -------- >>> table = \"schema.table\" >>> database =", "read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False,", "str Signed S3 URL ('https://...') 'unquoted': bool Whether the cells", "str or int Export data from this database. Can be", "'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs)", "upload to. E.g., ``'scratch.table'``. api_key : DEPRECATED str, optional Your", "distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\",", "execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload the contents of a", "All provided files will be loaded as an atomic unit", "# Use Preprocess endpoint to get the table columns as", "table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True,", "provided value for whether or not the file contains errors.", "Platform does to provide a single output file # with", "with periods must be double quoted, e.g. ``'my_schema.\"my.table\"'``. database :", "if content-encoding is specified in the headers # then response.iter_content", "ID. Used for debugging. Raises ------ CivisImportError If the values", "'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs = []", ": string, optional The column delimiter. One of ``','``, ``'\\\\t'``", "False else True with TemporaryDirectory() as tmp_dir: tmp_path = os.path.join(tmp_dir,", "file. Each dictionary should have keys for column \"name\" and", "client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from Civis", "database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns,", "to wait between checks for job completion. hidden : bool,", "= [] for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)): #", "remove from the import before failing. If multiple files are", "the same ' 'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client,", "job as soon as it completes. hidden : bool, optional", "include_header=True, compression='none', delimiter='|', max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the", "record has been updated. If existing_table_rows is \"upsert\", this field", "{}'.format( idx, fcol_base_type, tcol_base_type ) ) if error_msgs: raise CivisImportError(", "optional A list of the columns indicating a record has", "in concurrent.futures.as_completed(done | still_going): output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0]", "UPDATE, DELETE, etc.). Parameters ---------- filename : str Download exported", "first_completed = done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info", "in enumerate(column_list): # Avoid mutating input arguments new_col = dict(col)", "optional (deprecated) If ``True``, archive the import job as soon", "to Civis file %s', filename, file_id) fut = civis_file_to_table(file_id, database,", "fout: fout.write(headers) _decompress_stream(response, fout) # decompress the stream, write headers,", "``False``. prefix: str, optional A user specified filename prefix for", "except the CSV parts are accessible via both files endpoint", "AS order\"] >>> data = read_civis(table, database, columns=columns) >>> columns", "= client.get_database_id(database) credential_id = credential_id or client.default_credential # don't fix", "_download_file(url, local_path, headers, compression): response = requests.get(url, stream=True) response.raise_for_status() #", "'zip': arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def", "try: from io import StringIO except ImportError: from cStringIO import", "compression): def callback(future): if not future.succeeded(): return outputs = future.result().get(\"output\")", "{} returned no output.\" .format(script_id)) url = outputs[0][\"path\"] file_id =", "d = zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or d.unused_data: if", "client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from a", "is False else True with TemporaryDirectory() as tmp_dir: tmp_path =", "Can be the database name or ID. use_pandas : bool,", "the stream elif compression == 'none': with open(local_path, 'wb') as", "' 'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns)) ) error_msgs =", "be removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) headers", "job completion. archive : bool, optional (deprecated) If ``True``, archive", "existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\",", "Read table contents into memory. civis.io.read_civis_sql : Read results of", "# Ensure that all results from files are correctly accounted", "A list of rows (with header as first row) if", "polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Read data from a Civis table.", "\"my_data\", use_pandas=True) >>> col_a = df[\"column_a\"] See Also -------- civis.io.read_civis_sql", "bool, optional Whether or not the first row of the", "in the table. delimiter : string, optional The column delimiter.", "`filename` into memory. Examples -------- >>> with open('input_file.csv', 'w') as", "= run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d for import %d',", "``True``. compression: str, optional Type of compression to use, if", "I/O csv_settings = dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id,", "users to enter their own names parameter _kwargs = {'names':", "result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if need_table_columns: file_columns =", ">>> col_a = df[\"column_a\"] >>> data = read_civis_sql(sql, \"my_database\") >>>", "_download_file(url, filename, headers, compression) return callback def split_schema_tablename(table): \"\"\"Split a", "file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter,", "def _download_file(url, local_path, headers, compression): response = requests.get(url, stream=True) response.raise_for_status()", "**kwargs): \"\"\"Upload a `pandas` `DataFrame` into a Civis table. The", "imports to the same destination table. credential_id : str or", "Can be the database name or ID. table : str", "or tablenames with periods must be double quoted, e.g. ``'scratch.\"my.table\"'``.", "can't change state sql = 'begin read only; select *", "(script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()[\"output\"] if not", "table name, but the ``tablename`` will always be filled. Raises", "return new_cols def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden, polling_interval=None):", "%s (%s)', outputs[0][\"output_name\"], file_id) if use_pandas: # allows users to", "the purposes of type checking, we care only that the", "change for now # when gzip compression is requested, a", "table \"\"\" client = client or APIClient() script_id, run_id =", "SQL string. The custom SQL string will be executed twice;", "between detected and provided ' 'compressions - provided compression was", "column name. Parameters ---------- column_list: list[dict] the list of columns", "is necessary to account # for these possible completed cleaning", "delayed executions move data from staging table to final table", "table_exists = False # Use Preprocess endpoint to get the", "so write headers as gzip if compression == 'gzip': with", "loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id, client=client, polling_interval=polling_interval)", "will be # compared to this one. If inconsistencies are", "outputs = future.result().get(\"output\") if not outputs: warnings.warn(\"Job %s, run %s", "delimiter for {}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if compression", "fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers and decompress the", "# don't fix bug that would cause breaking change for", "and decompress the stream elif compression == 'none': with open(local_path,", "run %d for import %d', fut.run_id, import_job.id) return fut def", "column names. Default: ``True``. compression: str, optional Type of compression", "Raises ------ ValueError If the input ``table`` is not separable", "CSV file. \"\"\" if client is None: client = APIClient(api_key=api_key)", "compression == 'zip': with TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir,", "a random job name will be used. client : :class:`civis.APIClient`,", "One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to", "file # with headers prepended to it due to how", "fut = civis_to_csv(\"file.csv\", sql, \"my_database\") >>> fut.result() # Wait for", "idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)): # for the purposes", "list, tuple or None\") select = \", \".join(columns) if columns", "DECIMAl) rather than that # they have the same precision", "'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table)", "to enter their own names parameter _kwargs = {'names': headers}", "concurrent.futures import csv from os import path import io import", "'my-database', ... 'scratch.df_table') >>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\"", "civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter) See", "the same number of ' 'columns. Expected {} columns but", "instead.\", FutureWarning) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential", "of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``.", "a Civis table. Parameters ---------- table : str Name of", "\"name\" and \"sqlType\". The import will only copy these columns", "FutureWarning) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential #", "quote fields. Default: ``False``. prefix: str, optional A user specified", "seconds to wait between checks for query completion. hidden :", "destination table that uniquely identify a record. If existing_table_rows is", "= client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns else None) if", "list A list of rows (with header as first row)", "The column to use as the distkey for the table.", "False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table) file_id =", "return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table, api_key=None, client=None,", "= dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id", "move data from staging table to final table after a", "this function. Parameters ---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame` to", "Default: ``','``. unquoted: bool, optional Whether or not to quote", "delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``. If not provided,", "from civis.futures import CivisFuture from civis.io import civis_to_file, file_to_civis, query_civis", "use_pandas: # allows users to enter their own names parameter", "be loaded as an atomic unit in parallel, and should", "will be used. client : :class:`civis.APIClient`, optional If not provided,", "will be used. polling_interval : int or float, optional Number", "Has the following keys: 'query': str The query. 'header': list", "and perform necessary file cleaning need_table_columns = ((not table_exists or", "hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers,", "different types, their delimiters are different, headers are present in", "an error. first_completed = done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id", "The provided value for whether or not the file contains", "autodetect whether or not the first row contains headers. credential_id", "max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql, database, job_name, credential_id, hidden,", "object. Raises ------ CivisImportError If multiple files are given and", "not installed.\") if archive: warnings.warn(\"`archive` is deprecated and will be", "= civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1,", "if any. One of ``','``, ``'\\t'``, or ``'|'``. Default: ``'|'``.", "same format. Parameters ---------- file_id : int or list[int] Civis", "\"delimiter must be one of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression,", ": str, optional The second column in a compound sortkey", "be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A", "# and perform necessary file cleaning need_table_columns = ((not table_exists", "breaking change headers = b'' delimiter = DELIMITERS.get(delimiter) if not", "the following keys: 'id': int File ID 'name': str Filename", "of `filename` into memory. Examples -------- >>> with open('input_file.csv', 'w')", "\"my_data\" >>> columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date) AS", "a list of Civis file IDs. Reference by name to", "script %d', run_job.id, export_job.id) return export_job.id, run_job.id def _get_sql_select(table, columns=None):", "return callback def split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename' string Remember", "of types is performed and each row will be a", "``table`` is not separable into a schema and table name.", "be the database name or ID. use_pandas : bool, optional", "the cells are quoted. 'compression': str Type of compression used.", "import job as soon as it completes. hidden : bool,", "Wait for job to complete See Also -------- civis.io.read_civis :", "Returns ------- schema, tablename A 2-tuple of strings. The ``schema``", "or ID. use_pandas : bool, optional If ``True``, return a", "headers, delimiter, hidden, polling_interval=None): cleaning_futures = [] for fid in", "files to a Civis table. All provided files will be", "future release, a ``'gzip'`` compressed file will be returned for", "open(tmp_path, 'wb') as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w')", "last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the", "If not provided, will attempt to auto-detect. headers : bool,", "Delimiter that separates the cells. Examples -------- >>> sql =", "run_id)[\"output\"] if not outputs: raise EmptyResultError(\"Query {} returned no output.\"", "api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None,", "be passed directly to :func:`~pandas.DataFrame.to_csv`) to modify the column names", "import shutil from tempfile import TemporaryDirectory import warnings import zlib", "read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs)", "\"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup = next(reader)", "If omitted, all columns are exported. use_pandas : bool, optional", "use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True but pandas is", "created. file_columns: List[Dict[str, str]] The columns detected by the Civis", "to modify the column names in the Civis Table. credential_id", "str Either a Redshift schema and table name combined with", "gzip file is not actually returned # instead the gzip", "run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql,", "not None else \"*\" sql = \"select {} from {}\".format(select,", "run %s does not have any output to \" \"download.", "gzipped buffers can be concatenated so write headers as gzip", "UI. **kwargs : kwargs Extra keyword arguments are passed into", "polling_interval : int or float, optional Number of seconds to", "imported, there might be differences in # their precisions/lengths -", "\"SELECT * FROM schema.my_big_table\" >>> database = \"my_database\" >>> delimiter", "civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if client", "unless include_header is set to False. In a future release,", "not outputs: raise EmptyResultError(\"Unload query {} returned no manifest.\" .format(script_id))", "Number of seconds to wait between checks for job completion.", "`CivisFuture` object. Examples -------- >>> sql = \"SELECT * FROM", "the input ``table`` is not separable into a schema and", "ID. If ``None``, the default credential will be used. polling_interval", "that changes in state cannot occur (e.g., INSERT, UPDATE, DELETE,", "have keys for column \"name\" and \"sqlType\". The import will", "``'my_schema.my_table'``. Schemas or tablenames with periods must be double quoted,", "except ValueError: table_exists = False # Use Preprocess endpoint to", "delimiter = ',' fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors,", "value for whether or not the file contains errors. delimiter:", "BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df = pd.read_csv(buf,", "int, optional Maximum number of Megabytes each created file will", "incompatible for import. This may be the case if their", "logging import os import shutil from tempfile import TemporaryDirectory import", "be used. include_header: bool, optional If ``True`` include a key", "to quote fields. Default: ``False``. prefix: str, optional A user", "# with headers prepended to it due to how distributed", "of compression to use, if any. One of ``'none'``, ``'zip'``,", "these columns regardless if there are more columns in the", "Use the `header` parameter (which will be passed directly to", "delimiter is not None: # i.e. it was provided as", "def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter='|',", "existing_table_rows is \"upsert\", this field is required. Note that this", "{} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job", "sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", credential_id=None,", "# share a base type (e.g. INT, VARCHAR, DECIMAl) rather", "CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def f(x):", "output_file_id, detected_info[\"columnDelimiter\"]) ) if compression != detected_info['compression']: raise CivisImportError('Mismatch between", "{}, but expected {}'.format( idx, fcol_base_type, tcol_base_type ) ) if", "Which delimiter to use, if any. One of ``','``, ``'\\t'``,", "Execute the query against this database. Can be the database", "S3 urls. Parameters ---------- sql : str The SQL select", "_decompress_stream(response, buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs)) return data", "Maximum number of Megabytes each created file will be. unquoted:", "are detected, raise an error. first_completed = done.pop() output_file =", "query's results in a Civis file \"\"\" if archive: warnings.warn(\"`archive`", "is specified in the headers # then response.iter_content will decompress", "columns from file cleaning. Returns -------- column_list: list[dict] \"\"\" new_cols", "1)[0] if tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}: File base", "a primary key. last_modified_keys: list[str], optional A list of the", "name to give the job. If omitted, a random job", "poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None,", "[row[col_a_index] for row in data] Notes ----- This reads the", "and length # (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(',", "= requests.get(url, stream=True) response.raise_for_status() with StringIO() as buf: if headers:", "\".\", or else a single table name. Returns ------- schema,", "Also -------- civis.io.read_civis : Read table contents into memory. civis.io.read_civis_sql", "polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): \"\"\"Check", "diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys,", "CSV file. The custom SQL string will be executed twice;", "prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the result of SQL query and", "of seconds to wait between checks for job completion. archive", "to Civis. Parameters ---------- filename : str Upload the contents", "[entry['id'] for entry in manifest['entries']] >>> buf = BytesIO() >>>", "is different which would introduce a breaking change headers =", "= _process_cleaning_results( cleaning_futures, client, headers, need_table_columns, delimiter ) table_columns =", "removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) db_id =", "data = pd.read_csv(url, **_kwargs) else: response = requests.get(url, stream=True) response.raise_for_status()", "columns.index(\"column_a\") >>> col_a = [row[col_a_index] for row in data] Notes", "Default: ``False``. polling_interval : int or float, optional Number of", "table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table,", "addition, if existing_table_rows is \"upsert\", delayed executions move data from", "last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def", "column ' 'detection'.format(table=table)) table_exists = True except ValueError: table_exists =", "Remember that special characters (such as '.') can only be", "but detected compression {}. Please ' 'ensure all imported files", "contains more than one Future. Thus it is necessary to", "dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id =", "will be removed in v2.0. Tables will always be written", "table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started", "or not to quote fields. Default: ``False``. prefix: str, optional", "# then response.iter_content will decompress the stream # however, our", "to csv. civis.io.export_to_civis_file : Store a SQL query's results in", "the CSV will be headers. Default: ``True``. compression: str, optional", "fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs: raise", "a key in the returned dictionary containing a list of", "import %d', fut.run_id, import_job.id) return fut def _sql_script(client, sql, database,", "= _get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name, client=client,", "buffers can be concatenated so write headers as gzip if", "the default credential will be used. include_header: bool, optional If", "cleaning. Returns -------- column_list: list[dict] \"\"\" new_cols = [] for", "the job. If omitted, a random job name will be", "-------- >>> table = \"schema.table\" >>> database = \"my_data\" >>>", "this will greatly # reduce the work that Platform does", "headers = b'' delimiter = DELIMITERS.get(delimiter) if not delimiter: raise", "One of ``','``, ``'\\t'``, or ``'|'``. Default: ``'|'``. max_file_size: int,", "or list[int] Civis file ID or a list of Civis", "the work that Platform does to provide a single output", "is \"upsert\", this field is required. escaped: bool, optional A", "' 'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns,", "max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id,", "database, credential_id, polling_interval=None): headers = None try: # use 'begin", "SQL query's results in a Civis file \"\"\" if archive:", "table_columns def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that base column types", "created from the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional", "name will be used. client : :class:`civis.APIClient`, optional If not", "compression to gzip to reduce I/O csv_settings = dict(include_header=include_header, compression='gzip',", "... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if client is", "to final table after a brief delay, in order to", "unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the result of SQL query", "greatly # reduce the work that Platform does to provide", "write_bytes=False) buf.seek(0) data = list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key')", "file do not match their expected attributes. \"\"\" if headers", "in the Civis UI. Returns ------- unload_manifest: dict A dictionary", "A list of column names. Column SQL transformations are possible.", "client=client, polling_interval=polling_interval) log.debug('Started run %d for import %d', fut.run_id, import_job.id)", "file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client,", "credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client,", "list[int] Civis file ID or a list of Civis file", "in the source file. Each dictionary should have keys for", "this one. If inconsistencies are detected, raise an error. first_completed", "['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS", "shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers and decompress the stream", "a SQL query into memory. civis.io.export_to_civis_file : Store a SQL", "Parameters ---------- table: str Either a Redshift schema and table", "chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE) def", "optional A boolean value indicating whether or not the source", "``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional", "and `pandas` is not installed. Examples -------- >>> table =", "passed directly to :func:`~pandas.DataFrame.to_csv`) to modify the column names in", "columns regardless if there are more columns in the table.", "= list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql,", "the first argument to this function. Parameters ---------- df :", "performed and each row will be a list of strings.", "quoted, e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED str, optional Your Civis", "ID. job_name : str, optional A name to give the", "is only a table name, but the ``tablename`` will always", "zlib import gzip import zipfile from civis import APIClient from", "first row) if `use_pandas` is ``False``, otherwise a `pandas` `DataFrame`.", "values, use `df.reset_index()` instead of `df` as the first argument", "# decompress the stream, write headers, and zip the file", "is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids,", "A dictionary resembling an AWS manifest file. Has the following", "or ID. table : str The schema and table you", "database name or ID. columns : list, optional A list", "string, optional The column delimiter. One of ``','``, ``'\\\\t'`` or", "creating file %s.\" % (job_id, run_id, filename), RuntimeWarning) return else:", "key in the returned dictionary containing a list of column", "delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures,", "omitted, a random job name will be used. api_key :", "the stream # however, our use of content-encoding is inconsistent", "+ chunk if write_bytes: buf.write(d.decompress(to_decompress)) else: buf.write(d.decompress(to_decompress).decode('utf-8')) chunk = response.raw.read(CHUNK_SIZE)", "database. Can be the database name or ID. job_name :", "not appear in the Civis UI. csv_settings : dict, optional", "`DataFrame`. Note that if `use_pandas` is ``False``, no parsing of", "database name or ID. use_pandas : bool, optional If ``True``,", "# Wait for job to complete See Also -------- civis.io.read_civis", "to wait between checks for job completion. archive : bool,", "Upload data into this database. Can be the database name", "If \"delayed\", flag the table for a deferred statistics update;", "{table} already exists - skipping column ' 'detection'.format(table=table)) table_exists =", "try: # use 'begin read only;' to ensure we can't", "have any output to \" \"download. Not creating file %s.\"", "return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database, job_name=None, api_key=None,", "table : str Name of table, including schema, in the", ": int or float, optional Number of seconds to wait", "= client or APIClient() script_id, run_id = _sql_script(client=client, sql=sql, database=database,", "will be created from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional", "written with column names read from the DataFrame. Use the", "be returned for all cases. delimiter: str, optional Which delimiter", "we can't change state sql = 'begin read only; select", "all imported files have the same ' 'compression.'.format( compression, detected_info['compression'])", "of ``','``, ``'\\t'``, or ``'|'``. Default: ``','``. unquoted: bool, optional", "d.unused_data: if d.unused_data: to_decompress = d.unused_data + chunk d =", "that this is true regardless of whether the destination database", "\"my_database\" >>> delimiter = \"|\" >>> manifest = civis_to_multifile_csv(sql, database,", "output_file = client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns", "columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True, **kwargs):", "table_columns = table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination =", "\"sqlType\". The import will only copy these columns regardless if", "next(reader) if len(schema_name_tup) == 1: schema_name_tup = (None, schema_name_tup[0]) if", "the returned dictionary containing a list of column names. Default:", "- ' 'please ensure all imported files either ' 'have", "with a backslash. Defaults to false. execution: string, optional, default", "file_columns, output_obj_id): \"\"\"Check that base column types match those current", "data into this file. sql : str The SQL select", "or int Upload data into this database. Can be the", "a table name, but the ``tablename`` will always be filled.", "of type checking, we care only that the types #", "be double quoted, e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED str, optional", "polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'api_key') def csv_to_civis(filename, database, table,", "appear in the Civis UI. csv_settings : dict, optional A", "content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS |", "'begin read only; select * from ({}) limit 1'.format(sql) fut", "those current defined for the table. Parameters ---------- table_columns: List[Dict[str,", "file IDs. Reference by name to this argument is deprecated,", "str, optional Your Civis API key. If not given, the", "import TemporaryDirectory import warnings import zlib import gzip import zipfile", ": :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient` object will", "a brief delay, in order to accommodate multiple concurrent imports", "``'key'``. distkey : str, optional The column to use as", "to retrieve the data. This is done to use a", "= client.get_database_id(database) credential_id = credential_id or client.default_credential csv_settings = csv_settings", "sql : str The SQL select string to be executed.", "'.') can only be included in a schema or table", "as the name will change in v2.0.0. database : str", "database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False,", "local CSV file to Civis. Parameters ---------- filename : str", "the file delimiter. compression: str The provided value for the", "**kwargs): \"\"\"Read data from Civis using a custom SQL string.", "client, need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures = [] for", "= {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data = pd.read_csv(url,", "file cleaning with an appropriately blank column name. Parameters ----------", "a list of column names. Default: ``True``. compression: str, optional", "requested name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``,", "headers, delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results(", "primary_keys: list[str], optional A list of the primary key column(s)", "does not have any output to \" \"download. Not creating", "flag the table for a deferred statistics update; column statistics", "str, optional The second column in a compound sortkey for", "compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client,", "SQL. civis.io.civis_to_csv : Write directly to a CSV file. \"\"\"", "to complete See Also -------- civis.io.read_civis : Read table contents", "= client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns =", "fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type: error_msgs.append( 'Column", "str Type of compression used. 'delimiter': str Delimiter that separates", "NO_PANDAS = False except ImportError: NO_PANDAS = True CHUNK_SIZE =", "results from files are correctly accounted for - # Since", "APIClient from civis._utils import maybe_get_random_name from civis.base import EmptyResultError, CivisImportError", "Expected {} columns but file {} ' 'has {} columns'.format(", "Can be the database name or ID. job_name : str,", "= \"schema.table\" >>> database = \"my_data\" >>> columns = [\"column_a\",", "to be executed. database : str or int Execute the", "of strings. Raises ------ ImportError If `use_pandas` is ``True`` and", "',' fut = civis_file_to_table(file_id, database, table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle,", "list of rows (with header as first row) if `use_pandas`", "\" \"Use `hidden` instead.\", FutureWarning) db_id = client.get_database_id(database) credential_id =", "consideration; used for error messaging. Raises ------ CivisImportError If the", "unload_manifest: dict A dictionary resembling an AWS manifest file. Has", "Upload a Civis file to a Civis table \"\"\" client", "presigned S3 urls. Parameters ---------- sql : str The SQL", "rather than that # they have the same precision and", "are present in some but not others, or compressions do", "in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) if client is", "idx, fcol_base_type, tcol_base_type ) ) if error_msgs: raise CivisImportError( 'Encountered", "polling_interval=None, archive=False, hidden=True, **kwargs): \"\"\"Upload a `pandas` `DataFrame` into a", "buf: if headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0)", "for column \"name\" and \"sqlType\". The import will only copy", "{} columns but file {} ' 'has {} columns'.format( len(table_columns),", "memory. See Also -------- civis.io.read_civis : Read directly into memory", "file to Civis. Parameters ---------- filename : str Upload the", "database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None,", "client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not", "if there are more columns in the table. primary_keys: list[str],", "that the types # share a base type (e.g. INT,", "provide a (deprecated) api_key client = APIClient(api_key=api_key) sql = _get_sql_select(table,", "'wb') as fout: fout.write(headers) _decompress_stream(response, fout) # decompress the stream,", "file \"\"\" if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True", "results in a Civis file \"\"\" if use_pandas and NO_PANDAS:", "is None: client = APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id =", "include_header: compression = 'none' # don't support parallel unload; the", "= \"|\" >>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids", "= client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden )", "headers = False if kwargs.get('header') is False else True with", "no parsing of types is performed and each row will", "pd.read_csv(buf, delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql \"\"\" if client is", "File size in bytes 'url': str Unsigned S3 URL ('s3://...')", "columns indicating a record has been updated. If existing_table_rows is", "raise TypeError(\"columns must be a list, tuple or None\") select", "'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',': 'comma', '\\t': 'tab',", "encoding='utf-8') txt.seek(0) unload_manifest = json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers')", "as first row) if `use_pandas` is ``False``, otherwise a `pandas`", "from :func:`civis.APIClient.scripts.get_sql_runs` after the sql query has completed and the", "to 24 hours. In addition, if existing_table_rows is \"upsert\", delayed", "job_name, credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval,", "output to \" \"download. Not creating file %s.\" % (job_id,", "input ``table`` is not separable into a schema and table", "need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter, table_columns", ": Write directly to a CSV file. \"\"\" if client", "and the file columns have a type mismatch, or differ", "delimiter = \"|\" >>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter) >>>", "prefix for the output file to have. Default: ``None``. polling_interval", "values from first completed file cleaning - other files will", "value for the file compression. output_file_id: int The cleaned file's", "'query': str The query. 'header': list of str The columns", "column names read from the DataFrame. Use the `header` parameter", "a \".\", or else a single table name. Returns -------", "script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden)", "columns = [\"column_a\", \"ROW_NUMBER() OVER(ORDER BY date) AS order\"] >>>", "E.g., ``'scratch.table'``. api_key : DEPRECATED str, optional Your Civis API", "or list A list of rows (with header as first", "database. Can be the database name or ID. columns :", "BY date) AS order\"] >>> data = read_civis(table, database, columns=columns)", "quotes escaped with a backslash. Defaults to false. execution: string,", "'header': list of str The columns from the query. 'entries':", "- setting this option will allow the Civis API #", "read_civis(table, database, columns=columns) >>> columns = data.pop(0) >>> col_a_index =", "polling_interval=None, hidden=True): \"\"\"Unload the result of SQL query and return", "the requested name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``,", "will not appear in the Civis UI. Returns ------- results", "import zlib import gzip import zipfile from civis import APIClient", "name. Returns ------- schema, tablename A 2-tuple of strings. The", "- # Since concurrent.futures.wait returns two sets, it is possible", "the first row of the file should be treated as", "headers, need_table_columns, delimiter ) table_columns = table_columns or cleaned_table_columns source", "itself requires a primary key. last_modified_keys: list[str], optional A list", "retrieve the data. This is done to use a more", "are different, headers are present in some but not others,", "'api_key') def civis_to_csv(filename, sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True,", "f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def", "or int Export data from this database. Can be the", "S3 URL ('s3://...') 'url_signed': str Signed S3 URL ('https://...') 'unquoted':", "default \"immediate\" One of \"delayed\" or \"immediate\". If \"immediate\", refresh", "`use_pandas` is ``False``, no parsing of types is performed and", "returns a file with no compression unless include_header is set", ">>> database = \"my_database\" >>> delimiter = \"|\" >>> manifest", "in parallel, and should share the same columns in the", "Instantiate client here in case users provide a (deprecated) api_key", "are possible. If omitted, all columns are exported. use_pandas :", "columns as needed # and perform necessary file cleaning need_table_columns", "civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\" if client", "if columns and not isinstance(columns, (list, tuple)): raise TypeError(\"columns must", "and not isinstance(columns, (list, tuple)): raise TypeError(\"columns must be a", "to Civis file %s', file_id) return _download_file(url, filename, headers, compression)", "should have the same number of ' 'columns. Expected {}", "read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a = df[\"column_a\"] See Also --------", "be removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) name", "sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval,", "See Also -------- civis.io.read_civis : Read directly into memory without", "the custom SQL is controlled such that changes in state", "the Civis UI. csv_settings : dict, optional A dictionary of", "csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) download", "memory. civis.io.export_to_civis_file : Store a SQL query's results in a", "unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql, database,", "tablenames with periods must be double quoted, e.g. ``'scratch.\"my.table\"'``. api_key", "Returns ------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Examples --------", "multiple files are given and determined to be incompatible for", "table. polling_interval : int or float, optional Number of seconds", "in the table. primary_keys: list[str], optional A list of the", "or int, optional The database credential ID. If ``None``, the", "str Filename 'size': int File size in bytes 'url': str", "gzip file is decompressed during download if compression == 'gzip'", "the sortkey for the table. sortkey2 : str, optional The", "existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None,", "new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols def", "file is not actually returned # instead the gzip file", "redshift as it uses a 'PARALLEL ON' S3 unload. It", "# Set values from first completed file cleaning - other", "sql = 'begin read only; select * from ({}) limit", "= [file_id] if schema is None: raise ValueError(\"Provide a schema", "more consistent approach # if content-encoding is specified in the", "distkey=None, sortkey1=None, sortkey2=None, table_columns=None, primary_keys=None, last_modified_keys=None, escaped=False, execution=\"immediate\", delimiter=None, headers=None,", "Name of table, including schema, in the database. E.g. ``'my_schema.my_table'``.", ": str or int, optional The database credential ID. If", "import pandas as pd NO_PANDAS = False except ImportError: NO_PANDAS", "return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None,", "'w') as _input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ...", "unit in parallel, and should share the same columns in", "detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns'] if need_table_columns else None)", "The custom SQL string will be executed twice; once to", "upload to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional If not", "variable will be used. client : :class:`civis.APIClient`, optional If not", "INSERT, UPDATE, DELETE, etc.). Parameters ---------- sql : str The", "1 import_name = 'CSV import to {}.{}'.format(schema, table) import_job =", "of seconds to wait between checks for query completion. hidden", "function. Parameters ---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame` to upload", "The maximum number of rows with errors to remove from", "date) AS order\"] >>> data = read_civis(table, database, columns=columns) >>>", "hidden=hidden, **kwargs) return data def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None,", "delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one of", "``'my_schema.\"my.table\"'``. database : str or int Read data from this", "statistics as part of the run. If \"delayed\", flag the", "'unquoted': bool Whether the cells are quoted. 'compression': str Type", "buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data = list(csv.reader(buf,", "that Platform does to provide a single output file #", "and be in the same format. Parameters ---------- file_id :", "for i, col in enumerate(column_list): # Avoid mutating input arguments", "``'fail'``. diststyle : str, optional The distribution style for the", "data. The first execution of the custom SQL is controlled", "file delimiter. compression: str The provided value for the file", "be one of {}\" .format(DELIMITERS.keys())) # always set compression to", "read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a = df[\"column_a\"] >>> data =", "export_job.id, run_job.id def _get_sql_select(table, columns=None): if columns and not isinstance(columns,", "... _input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data')", "file. database : str or int Upload data into this", "default, ``None``, attempts to autodetect whether or not the first", "precision and length # (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type =", "tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as fout: arcname", "client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None,", "# for these possible completed cleaning runs while waiting on", "[DEPRECATED] Whether or not the first row of the file", "not others, or compressions do not match. Examples -------- >>>", "corresponding to the columns in the source file. Each dictionary", "limit 1'.format(sql) fut = query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval) headers", "[] for i, col in enumerate(column_list): # Avoid mutating input", "compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids,", "delimiter=delimiter) See Also -------- civis.APIClient.scripts.post_sql \"\"\" if client is None:", "= credential_id or client.default_credential # Try to get headers separately.", "= 'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client, need_table_columns, headers,", "number of ' 'columns. Expected {} columns but file {}", "for the output file to have. Default: ``None``. polling_interval :", "'scratch.my_data') >>> fut.result() \"\"\" if client is None: client =", "into this file. sql : str The SQL select string", "stream # however, our use of content-encoding is inconsistent chunk", "passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed into", "table_columns = _replace_null_column_names(table_columns) return cleaned_file_ids, headers, compression, delimiter, table_columns def", "} @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None,", "and the result has been stored as a Civis file.", "\"delayed\" or \"immediate\". If \"immediate\", refresh column statistics as part", "results of a SQL query into memory. civis.io.export_to_civis_file : Store", "in data] >>> df = read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a", "not actually returned # instead the gzip file is decompressed", "# data at scale. headers = _get_headers(client, sql, db_id, credential_id,", "The import will only copy these columns regardless if there", "civis.io.civis_to_csv : Write directly to a CSV file. civis.io.civis_file_to_table :", "A `CivisFuture` object. Notes ----- This reads the contents of", "delimiter=delimiter, headers=headers, credential_id=credential_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return", "client, headers, need_table_columns, delimiter): cleaned_file_ids = [] done, still_going =", "part of the run. If \"delayed\", flag the table for", "from civis.io import civis_to_file, file_to_civis, query_civis from civis.utils import run_job", "backslash. Defaults to false. execution: string, optional, default \"immediate\" One", "of rows with errors to remove from the import before", "the table. sortkey2 : str, optional The second column in", "table that uniquely identify a record. If existing_table_rows is \"upsert\",", "contents of this file. database : str or int Upload", "escaped=False, execution=\"immediate\", delimiter=None, headers=None, credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the contents", "compression: str The provided value for the file compression. output_file_id:", "necessary to account # for these possible completed cleaning runs", "the table. Parameters ---------- table_columns: List[Dict[str, str]] The columns for", "be executed. database : str or int Export data from", "will be passed directly to :func:`~pandas.DataFrame.to_csv`) to modify the column", "object. Examples -------- >>> import pandas as pd >>> df", "schema.table\" >>> fut = export_to_civis_file(sql, \"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"]", "do not.') if delimiter != detected_info['columnDelimiter']: raise CivisImportError('Provided delimiter \"{}\"", "= [] for fid in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid,", ": Store a SQL query's results in a Civis file", "string, optional, default \"immediate\" One of \"delayed\" or \"immediate\". If", "with a \".\", or else a single table name. Returns", "_decompress_stream(response, buf, write_bytes=True): # use response.raw for a more consistent", "(deprecated) If ``True``, archive the import job as soon as", "headers is None else False csv_settings = dict(include_header=include_header, compression='gzip') script_id,", "headers. The default, ``None``, attempts to autodetect whether or not", "or ``'|'``. Default: ``','``. unquoted: bool, optional Whether or not", "first execution of the custom SQL is controlled such that", "for unloading large queries/tables from redshift as it uses a", "queries/tables from redshift as it uses a 'PARALLEL ON' S3", "along with the other values, use `df.reset_index()` instead of `df`", "gzip if compression == 'gzip': with gzip.open(local_path, 'wb') as fout:", "current defined for the table. Parameters ---------- table_columns: List[Dict[str, str]]", "data = read_civis(table, database, columns=columns) >>> columns = data.pop(0) >>>", "Write directly to a CSV file. civis.io.civis_file_to_table : Upload a", "= civis_to_multifile_csv(sql, database, delimiter=delimiter) >>> ids = [entry['id'] for entry", "d = zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress = d.unconsumed_tail +", "provided files will be loaded as an atomic unit in", "of {}\".format( DELIMITERS.keys() ) try: client.get_table_id(table, database) log.debug('Table {table} already", "If ``None``, the default credential will be used. polling_interval :", "a future release, a ``'gzip'`` compressed file will be returned", "import pandas as pd >>> df = pd.DataFrame({'a': [1, 2,", "name or ID. table : str The schema and table", "client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns)", "delimiter : string, optional The column delimiter. One of ``','``,", "client=client, credential_id=credential_id, polling_interval=polling_interval) headers = fut.result()['result_columns'] except Exception as exc:", "raise EmptyResultError(\"Query {} returned no output.\" .format(script_id)) url = outputs[0][\"path\"]", "= future.result().get(\"output\") if not outputs: warnings.warn(\"Job %s, run %s does", "will be used. api_key : DEPRECATED str, optional Your Civis", "returned for all cases. delimiter: str, optional Which delimiter to", "'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',': 'comma', '\\t': 'tab', '|':", "are exported. use_pandas : bool, optional If ``True``, return a", ">>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df = pd.read_csv(buf, delimiter=delimiter)", "is not None: # i.e. it was provided as an", "client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id) log.debug('Started", "None: delimiter = detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter,", "directly into memory using SQL. civis.io.civis_to_csv : Write directly to", "the table. delimiter : string, optional The column delimiter. One", "match those current defined for the table. Parameters ---------- table_columns:", "_run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids, headers, compression, delimiter,", "of Megabytes each created file will be. unquoted: bool, optional", "df : :class:`pandas:pandas.DataFrame` The `DataFrame` to upload to Civis. database", "is deprecated and will be removed in v2.0.0. \" \"Use", "table you want to upload to. E.g., ``'scratch.table'``. client :", "new_cols = [] for i, col in enumerate(column_list): # Avoid", "raise EmptyResultError(\"Unload query {} returned no manifest.\" .format(script_id)) buf =", "3], 'b': [4, 5, 6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database',", "polling_interval=polling_interval) log.debug('Started run %d for import %d', fut.run_id, import_job.id) return", "set to False. In a future release, a ``'gzip'`` compressed", "of a length-related import failure loosen_types = len(file_id) > 1", "table) return sql def _get_headers(client, sql, database, credential_id, polling_interval=None): headers", "optional The ID of the database credential. If ``None``, the", "column_list: list[dict] \"\"\" new_cols = [] for i, col in", "import concurrent.futures import csv from os import path import io", ": kwargs Extra keyword arguments are passed into :func:`pandas:pandas.read_csv` if", "if not future.succeeded(): return outputs = future.result().get(\"output\") if not outputs:", "``True``, archive the import job as soon as it completes.", "import run_job from civis._deprecation import deprecate_param import requests try: from", "default credential will be used. polling_interval : int or float,", "query has completed and the result has been stored as", "once to attempt to retrieve headers and once to retrieve", "have the same number of ' 'columns. Expected {} columns", "csv_settings=None): \"\"\"Store results of a query to a Civis file", "still running. for result in concurrent.futures.as_completed(done | still_going): output_file =", "Civis file \"\"\" if archive: warnings.warn(\"`archive` is deprecated and will", "provided, and decreases the # risk of a length-related import", "def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False,", "as needed # and perform necessary file cleaning need_table_columns =", "the table. headers : bool, optional [DEPRECATED] Whether or not", "delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if compression != detected_info['compression']: raise CivisImportError('Mismatch", "sets, it is possible # That done contains more than", ":envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The ID of", "to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A future which", "to this function. Parameters ---------- df : :class:`pandas:pandas.DataFrame` The `DataFrame`", "be created. file_columns: List[Dict[str, str]] The columns detected by the", "possible. If omitted, all columns are exported. use_pandas : bool,", "dict(include_header=include_header, compression='gzip') script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id,", "want to upload to. E.g., ``'scratch.table'``. Schemas or tablenames with", "True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None,", "be the database name or ID. table : str The", "staging table to final table after a brief delay, in", "is deprecated, as the name will change in v2.0.0. database", "whether or not the first row contains headers. primary_keys: list[str],", "return sql def _get_headers(client, sql, database, credential_id, polling_interval=None): headers =", "api_key client = APIClient(api_key=api_key) sql = _get_sql_select(table, columns) data =", "tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type:", "name, client=client) delimiter = ',' fut = civis_file_to_table(file_id, database, table,", "from file cleaning with an appropriately blank column name. Parameters", "('s3://...') 'url_signed': str Signed S3 URL ('https://...') 'unquoted': bool Whether", "(cleaned_file_ids, headers, compression, delimiter, cleaned_table_columns) = _process_cleaning_results( cleaning_futures, client, headers,", "hidden=True): \"\"\"Upload the contents of one or more Civis files", "if client is None: client = APIClient(api_key=api_key) if use_pandas and", "that all results from files are correctly accounted for -", "allow the Civis API # to increase these values for", "a Civis file Parameters ---------- sql : str The SQL", "decreases the # risk of a length-related import failure loosen_types", "must be double quoted, e.g. ``'my_schema.\"my.table\"'``. database : str or", "list of dict Each dict has the following keys: 'id':", "(e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- filename : str", "= csv_settings or {} export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql,", "The columns for the table to be created. file_columns: List[Dict[str,", "import deprecate_param import requests try: from io import StringIO except", "output_file = client.jobs.list_runs_outputs( result.job_id, result.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info if", "still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from", "_run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures = []", "DELETE, etc.). Parameters ---------- sql : str The SQL select", "log = logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis',", "precisions/lengths - setting this option will allow the Civis API", "the table for a deferred statistics update; column statistics may", "``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter: str, optional Which", "as buf: if headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf, write_bytes=False)", "\"\"\"Split a Redshift 'schema.tablename' string Remember that special characters (such", "If multiple files are being imported, there might be differences", "max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None, sortkey2=None, table_columns=None, delimiter=\",\", headers=None, primary_keys=None,", "if client is None: client = APIClient() schema, table =", "script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings)", "archive=False, hidden=True, **kwargs): \"\"\"Read data from Civis using a custom", "quotechar='\"') schema_name_tup = next(reader) if len(schema_name_tup) == 1: schema_name_tup =", "kwargs Extra keyword arguments are passed into :func:`pandas:pandas.read_csv` if `use_pandas`", "include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export data from", "file. The custom SQL string will be executed twice; once", "multiple files are provided, this limit applies across all files", "in the headers # then response.iter_content will decompress the stream", ":envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`, optional", "- provided compression was {}' ' but detected compression {}.", "credential_id=None, polling_interval=None, hidden=True): \"\"\"Upload the contents of one or more", "dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id = _sql_script(client,", "for the table. Parameters ---------- table_columns: List[Dict[str, str]] The columns", "an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id", "will be a list of strings. Raises ------ ImportError If", "= False if kwargs.get('header') is False else True with TemporaryDirectory()", "to %s\", str(exc)) return headers def _decompress_stream(response, buf, write_bytes=True): #", "database, columns=columns) >>> columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\")", "number of Megabytes each created file will be. unquoted: bool,", "must be double quoted, e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED str,", "modify the column names in the Civis Table. credential_id :", "CivisImportError If the table columns and the file columns have", "from the query. 'entries': list of dict Each dict has", "as tmp_dir: tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv') to_csv_kwargs = {'encoding': 'utf-8',", "The query. 'header': list of str The columns from the", "int File size in bytes 'url': str Unsigned S3 URL", "FutureWarning) name = path.basename(filename) with open(filename, \"rb\") as data: file_id", "it is possible # That done contains more than one", "source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution,", "column types match those current defined for the table. Parameters", ") if error_msgs: raise CivisImportError( 'Encountered the following errors for", "row) if `use_pandas` is ``False``, otherwise a `pandas` `DataFrame`. Note", "polling_interval=None, archive=False, hidden=True): \"\"\"Upload the contents of a local CSV", ">>> df = read_civis_sql(sql, \"my_database\", use_pandas=True) >>> col_a = df[\"column_a\"]", "is intended for unloading large queries/tables from redshift as it", "gzip import zipfile from civis import APIClient from civis._utils import", "or int Execute the query against this database. Can be", "quoted, e.g. ``'my_schema.\"my.table\"'``. database : str or int Read data", "job completion. hidden : bool, optional If ``True`` (the default),", "and will be removed in v2.0.0. \" \"Use `hidden` instead.\",", "A dictionary of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns -------", "all columns are exported. use_pandas : bool, optional If ``True``,", "scale. headers = _get_headers(client, sql, db_id, credential_id, polling_interval) # include_header", "treated as headers. The default, ``None``, attempts to autodetect whether", "= client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job = client.scripts.post_sql_runs(export_job.id)", "File base type was {}, but expected {}'.format( idx, fcol_base_type,", "Column SQL transformations are possible. If omitted, all columns are", "be used. api_key : DEPRECATED str, optional Your Civis API", "csv_settings : dict, optional A dictionary of csv_settings to pass", "function is intended for unloading large queries/tables from redshift as", "one of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header, compression=compression, column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix,", "headers. Default: ``True``. compression: str, optional Type of compression to", "field is required. Note that this is true regardless of", "cleaning runs while waiting on those which # are still", "to use as the sortkey for the table. sortkey2 :", ":class:`pandas:pandas.DataFrame` or list A list of rows (with header as", "future which returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after the sql", "the result of SQL query and return presigned urls. This", "the :envvar:`CIVIS_API_KEY`. credential_id : str or int, optional The database", "column(s) of the destination table that uniquely identify a record.", "RuntimeWarning) return else: url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported", "_input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>>", "``'|'``. Default: ``'|'``. max_file_size: int, optional Maximum number of Megabytes", "of column names. Column SQL transformations are possible. If omitted,", "data: file_id = file_to_civis(data, name, client=client) log.debug('Uploaded file %s to", "If \"immediate\", refresh column statistics as part of the run.", "Upload the contents of this file. database : str or", "the `table` input.\") db_id = client.get_database_id(database) cred_id = credential_id or", "delimiter ) table_columns = table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids)", "match ' 'detected delimiter for {}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"])", "have a type mismatch, or differ in count. \"\"\" if", "response = requests.get(url, stream=True) response.raise_for_status() with StringIO() as buf: if", "hidden=True, **kwargs): \"\"\"Read data from a Civis table. Parameters ----------", "name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options,", "Set values from first completed file cleaning - other files", "kwargs.get('header') is False else True with TemporaryDirectory() as tmp_dir: tmp_path", "``False``. Returns ------- data : :class:`pandas:pandas.DataFrame` or list A list", "as it completes. hidden : bool, optional If ``True`` (the", "each row will be a list of strings. Raises ------", "{ ',': 'comma', '\\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key')", "optional Number of seconds to wait between checks for query", "only that the types # share a base type (e.g.", "credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload the contents of a local", "read from the DataFrame. Use the `header` parameter (which will", "\"\"\" if client is None: client = APIClient(api_key=api_key) if use_pandas", "A boolean value indicating whether or not the source file(s)", "into memory. Examples -------- >>> with open('input_file.csv', 'w') as _input:", "APIClient() schema, table = split_schema_tablename(table) if isinstance(file_id, int): file_id =", "name. Parameters ---------- column_list: list[dict] the list of columns from", "e.g. ``'scratch.\"my.table\"'``. api_key : DEPRECATED str, optional Your Civis API", "if headers is None: headers = detected_info['includeHeader'] if delimiter is", "if not outputs: raise EmptyResultError(\"Unload query {} returned no manifest.\"", "str The provided value for the file compression. output_file_id: int", "hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False)", "the input is only a table name, but the ``tablename``", "prepended to it due to how distributed databases export #", "this file. sql : str The SQL select string to", "'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',': 'comma', '\\t':", "``None``, attempts to autodetect whether or not the first row", "of the `table` input.\") db_id = client.get_database_id(database) cred_id = credential_id", "the query against this database. Can be the database name", "dict(file_ids=cleaned_file_ids) destination = dict(schema=schema, table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options", "# NOQA log.debug(\"Failed to retrieve headers due to %s\", str(exc))", ": DEPRECATED str, optional Your Civis API key. If not", "= True CHUNK_SIZE = 32 * 1024 log = logging.getLogger(__name__)", ": str or int Execute the query against this database.", "if not delimiter: raise ValueError(\"delimiter must be one of {}\"", "`use_pandas` is ``False``, otherwise a `pandas` `DataFrame`. Note that if", "to use, if any. One of ``'none'``, ``'zip'``, or ``'gzip'``.", "parallel, and should share the same columns in the same", ".format(script_id)) url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to", ": str Download exported data into this file. sql :", "failing. If multiple files are provided, this limit applies across", "\"\"\" if client is None: client = APIClient() schema, table", "table_columns = (detected_info['tableColumns'] if need_table_columns else None) if headers is", "None: client = APIClient(api_key=api_key) delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter", "Raises ------ CivisImportError If the table columns and the file", "= fcol['sql_type'].split('(', 1)[0] if tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}:", ":func:`~pandas.DataFrame.to_csv`) to modify the column names in the Civis Table.", "_check_all_detected_info(detected_info, headers, delimiter, compression, output_file_id): \"\"\"Check a single round of", "fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs =", "tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with open(tmp_path, 'wb') as tmp_file:", "The distribution style for the table. One of ``'even'``, ``'all'``", "credential_id : str or int, optional The database credential ID.", "be used. client : :class:`civis.APIClient`, optional If not provided, an", "'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none',", "compression to use, if any. One of ``'none'``, ``'zip'``, or", "enumerate(column_list): # Avoid mutating input arguments new_col = dict(col) if", "use a more performant method for retrieving the data. The", "no output.\" .format(script_id)) url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported", "order\"] >>> data = read_civis(table, database, columns=columns) >>> columns =", "32) else: to_decompress = d.unconsumed_tail + chunk if write_bytes: buf.write(d.decompress(to_decompress))", "str]] The columns for the table to be created. file_columns:", "# are still running. for result in concurrent.futures.as_completed(done | still_going):", "whether or not the first row contains headers. credential_id :", "loaded as an atomic unit in parallel, and should share", "or d.unused_data: if d.unused_data: to_decompress = d.unused_data + chunk d", "_sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings, hidden=hidden) fut = CivisFuture(client.scripts.get_sql_runs,", "has the following keys: 'id': int File ID 'name': str", "is not installed.\") if archive: warnings.warn(\"`archive` is deprecated and will", "data : :class:`pandas:pandas.DataFrame` or list A list of rows (with", "for query completion. hidden : bool, optional If ``True`` (the", "def dataframe_to_civis(df, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None,", "double-quotes. Parameters ---------- table: str Either a Redshift schema and", "more columns in the table. headers : bool, optional [DEPRECATED]", "a query to a Civis file Parameters ---------- sql :", "include_header = True if headers is None else False csv_settings", "an atomic unit in parallel, and should share the same", "boolean value indicating whether or not the source file(s) escape", "the source file has quotes escaped with a backslash. Defaults", "after a brief delay, in order to accommodate multiple concurrent", "dictionary should have keys for column \"name\" and \"sqlType\". The", "more Civis files to a Civis table. All provided files", "i, col in enumerate(column_list): # Avoid mutating input arguments new_col", "if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut", "'name': str Filename 'size': int File size in bytes 'url':", "headers : bool, optional Whether or not the first row", "``False``. polling_interval : int or float, optional Number of seconds", "are being imported, there might be differences in # their", "into a schema and table name. \"\"\" reader = csv.reader(StringIO(str(table)),", "the destination table that uniquely identify a record. If existing_table_rows", "= detected_info['includeHeader'] if delimiter is None: delimiter = detected_info['columnDelimiter'] compression", "VARCHAR(42), DECIMAL(8, 10)) tcol_base_type = tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(',", "use, if any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default", "----- This reads the contents of `filename` into memory. Examples", "cleaning results as compared to provided values. Parameters ---------- detected_info:", "If the input ``table`` is not separable into a schema", "if len(table_columns) != len(file_columns): raise CivisImportError('All files should have the", "appear in the Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture`", "API key. If not given, the :envvar:`CIVIS_API_KEY` environment variable will", "IDs and presigned S3 urls. Parameters ---------- sql : str", "actually returned # instead the gzip file is decompressed during", "Default ``'none'``. delimiter: str, optional Which delimiter to use, if", "job will not appear in the Civis UI. **kwargs :", "= ((not table_exists or existing_table_rows == 'drop') and table_columns is", "used. client : :class:`civis.APIClient`, optional If not provided, an :class:`civis.APIClient`", "an :class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. max_errors", "fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d for import", "= client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL script %d', run_job.id,", "\"select {} from {}\".format(select, table) return sql def _get_headers(client, sql,", "int The cleaned file's Civis ID. Used for debugging. Raises", "v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) name = path.basename(filename) with", "optional If ``True``, the first line of the CSV will", "the table. primary_keys: list[str], optional A list of the primary", "the Civis UI. **kwargs : kwargs Extra keyword arguments will", "d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress", "list of the primary key column(s) of the destination table", "str Name of table, including schema, in the database. E.g.", "List[Dict[str, str]] The columns for the table to be created.", "compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export data from Civis", "but file {} ' 'has {} columns'.format( len(table_columns), output_obj_id, len(file_columns))", "= data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>> col_a = [row[col_a_index]", "delimiter: raise ValueError(\"delimiter must be one of {}\" .format(DELIMITERS.keys())) #", "os import shutil from tempfile import TemporaryDirectory import warnings import", "etc.). Parameters ---------- sql : str The SQL select string", "int, optional The maximum number of rows with errors to", "for import %d', fut.run_id, import_job.id) return fut def _sql_script(client, sql,", "use of content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE) d =", "can only be included in a schema or table name", "this database. Can be the database name or ID. table", "a header or do not.') if delimiter != detected_info['columnDelimiter']: raise", "for the file delimiter. compression: str The provided value for", "the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number of", "reads the data into memory. See Also -------- civis.io.read_civis :", "\"download. Not creating file %s.\" % (job_id, run_id, filename), RuntimeWarning)", "= d.unused_data + chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32) else:", "boolean value indicating whether or not the source file has", "bool, optional Whether or not to quote fields. Default: ``False``.", "Civis file %s', filename, file_id) fut = civis_file_to_table(file_id, database, table,", "the following errors for file {}:\\n\\t{}'.format( output_obj_id, '\\n\\t'.join(error_msgs) ) )", "tablename A 2-tuple of strings. The ``schema`` may be None", "will greatly # reduce the work that Platform does to", "One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. ``'gzip'`` currently", "False except ImportError: NO_PANDAS = True CHUNK_SIZE = 32 *", "value for the file delimiter. compression: str The provided value", "_check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if", "a list of results from :func:`python:csv.reader`. job_name : str, optional", "open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response, fout) # decompress the", "int File ID 'name': str Filename 'size': int File size", "distkey=None, sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None,", "or ``'key'``. distkey : str, optional The column to use", "not isinstance(columns, (list, tuple)): raise TypeError(\"columns must be a list,", "url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis", "TypeError(\"columns must be a list, tuple or None\") select =", "and table name. \"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"')", "``','``, ``'\\t'``, or ``'|'``. Default: ``','``. unquoted: bool, optional Whether", "limit applies across all files combined. existing_table_rows : str, optional", "raise CivisImportError('All files should have the same number of '", "table. primary_keys: list[str], optional A list of the primary key", "'none': with open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response, fout) #", "the database name or ID. columns : list, optional A", "'api_key') def read_civis(table, database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None,", "and table_columns is None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers,", "will not appear in the Civis UI. csv_settings : dict,", "`hidden` instead.\", FutureWarning) name = path.basename(filename) with open(filename, \"rb\") as", "6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result()", "the source file. Each dictionary should have keys for column", "have different types, their delimiters are different, headers are present", "`df.reset_index()` instead of `df` as the first argument to this", "In addition, if existing_table_rows is \"upsert\", delayed executions move data", "write headers and decompress the stream elif compression == 'none':", "'detected delimiter for {}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if", "file_id = file_to_civis(tmp_path, name, client=client) delimiter = ',' fut =", "will be removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning)", "warnings.warn(\"Job %s, run %s does not have any output to", "= DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one of {}\".format(DELIMITERS.keys())", "schema is None: raise ValueError(\"Provide a schema as part of", "table name. \"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup", "\"schema.table\" >>> database = \"my_data\" >>> columns = [\"column_a\", \"ROW_NUMBER()", "ON' S3 unload. It returns a similar manifest file to", "in the database. E.g. ``'my_schema.my_table'``. Schemas or tablenames with periods", "[] for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)): # for", "civis._utils import maybe_get_random_name from civis.base import EmptyResultError, CivisImportError from civis.futures", "to be created. file_columns: List[Dict[str, str]] The columns detected by", "contents of one or more Civis files to a Civis", "@deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True,", "column in a compound sortkey for the table. table_columns :", "failure loosen_types = len(file_id) > 1 import_name = 'CSV import", "want to upload to. E.g., ``'scratch.table'``. api_key : DEPRECATED str,", "civis.io import civis_to_file, file_to_civis, query_civis from civis.utils import run_job from", "\"\"\" if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True but", "values detected on the file do not match their expected", "table. headers : bool, optional [DEPRECATED] Whether or not the", "2, 3], 'b': [4, 5, 6]}) >>> fut = civis.io.dataframe_to_civis(df,", "database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results of", "* FROM schema.table\" >>> df = read_civis_sql(sql, \"my_database\", use_pandas=True) >>>", "CivisImportError If multiple files are given and determined to be", "include a key in the returned dictionary containing a list", "row in data] >>> df = read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>>", "client = APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is", "bool, optional (deprecated) If ``True``, archive the import job as", "``'upsert'``. Defaults to ``'fail'``. diststyle : str, optional The distribution", "file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s (%s)',", "'gzip' and include_header: compression = 'none' # don't support parallel", "or differ in count. \"\"\" if len(table_columns) != len(file_columns): raise", "optional The behaviour if a table with the requested name", "this field is required. Note that this is true regardless", "table=table, remote_host_id=db_id, credential_id=cred_id, primary_keys=primary_keys, last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2],", "str]] The columns detected by the Civis API for the", "credential will be used. include_header: bool, optional If ``True`` include", "return a :class:`pandas:pandas.DataFrame`. Otherwise, return a list of results from", ") cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval)) return cleaning_futures def _check_all_detected_info(detected_info, headers, delimiter,", "None) cleaning_futures = _run_cleaning(file_id, client, need_table_columns, headers, delimiter, hidden) (cleaned_file_ids,", "file contains errors. delimiter: str The provided value for the", "using SQL. civis.io.civis_to_csv : Write directly to csv. civis.io.export_to_civis_file :", "job will not appear in the Civis UI. Returns -------", "hidden=True, **kwargs): \"\"\"Upload a `pandas` `DataFrame` into a Civis table.", "will be used. primary_keys: list[str], optional A list of the", "SQL. civis.io.read_civis_sql : Read results of a SQL query into", "the stream, write headers, and zip the file elif compression", ") fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d for", "into memory without SQL. civis.io.read_civis_sql : Read results of a", "ID or a list of Civis file IDs. Reference by", "and \"sqlType\". The import will only copy these columns regardless", "not appear in the Civis UI. Returns ------- results :", "v2.0.0. database : str or int Upload data into this", "to False. In a future release, a ``'gzip'`` compressed file", "client=client) delimiter = ',' fut = civis_file_to_table(file_id, database, table, client=client,", "'b': [4, 5, 6]}) >>> fut = civis.io.dataframe_to_civis(df, 'my-database', ...", "query's results in a Civis file \"\"\" if use_pandas and", "columns and the file columns have a type mismatch, or", "an appropriately blank column name. Parameters ---------- column_list: list[dict] the", "to have. Default: ``None``. polling_interval : int or float, optional", "{}\" .format(DELIMITERS.keys())) # always set compression to gzip to reduce", "presigned urls. This function is intended for unloading large queries/tables", "source file has quotes escaped with a backslash. Defaults to", "same columns in the same order, and be in the", "the data into memory. See Also -------- civis.io.read_civis : Read", "null names in columns from file cleaning with an appropriately", "'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = { ',': 'comma',", "= json.load(txt) return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database,", "credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results of a query to", "response.raise_for_status() with StringIO() as buf: if headers: buf.write(','.join(headers) + '\\n')", "schema or table name if delimited by double-quotes. Parameters ----------", "headers, compression) fut.add_done_callback(download) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True)", "list of column names. Default: ``True``. compression: str, optional Type", "file. civis.io.civis_file_to_table : Upload a Civis file to a Civis", "optional The column delimiter. One of ``','``, ``'\\\\t'`` or ``'|'``.", "client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export", "archive: warnings.warn(\"`archive` is deprecated and will be removed in v2.0.0.", "a Redshift 'schema.tablename' string Remember that special characters (such as", "if tcol_base_type != fcol_base_type: error_msgs.append( 'Column {}: File base type", "index along with the other values, use `df.reset_index()` instead of", "wait between checks for query completion. hidden : bool, optional", "fut.result() # Wait for job to complete See Also --------", "FutureWarning) if client is None: client = APIClient(api_key=api_key) db_id =", "a Civis file \"\"\" if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas", "Extra keyword arguments will be passed to :meth:`pandas:pandas.DataFrame.to_csv`. Returns -------", "names read from the DataFrame. Use the `header` parameter (which", "of the CSV will be headers. Default: ``True``. compression: str,", ": str, optional The behaviour if a table with the", "names in columns from file cleaning with an appropriately blank", "optional A list of column names. Column SQL transformations are", "buf = BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df", "= APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential", "query {} returned no manifest.\" .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'],", "from tempfile import TemporaryDirectory import warnings import zlib import gzip", "of this file. database : str or int Upload data", "FROM schema.table\" >>> fut = export_to_civis_file(sql, \"my_database\") >>> file_id =", "int Execute the query against this database. Can be the", "fields. Default: ``False``. polling_interval : int or float, optional Number", "behaviour if a table with the requested name already exists.", "if not outputs: warnings.warn(\"Job %s, run %s does not have", "= tcol['sql_type'].split('(', 1)[0] fcol_base_type = fcol['sql_type'].split('(', 1)[0] if tcol_base_type !=", "updated. If existing_table_rows is \"upsert\", this field is required. escaped:", "a schema and table name. \"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\",", "row contains headers. This parameter has no effect in versions", "import to {}.{}'.format(schema, table) import_job = client.imports.post_files_csv( source, destination, headers,", "string to be executed. database : str or int Execute", ":class:`civis.APIClient` object will be created from the :envvar:`CIVIS_API_KEY`. credential_id :", "ValueError If the input ``table`` is not separable into a", "query. 'entries': list of dict Each dict has the following", "URL ('s3://...') 'url_signed': str Signed S3 URL ('https://...') 'unquoted': bool", "brief delay, in order to accommodate multiple concurrent imports to", "column_delimiter=delimiter, unquoted=unquoted, filename_prefix=prefix, force_multifile=True, max_file_size=max_file_size) script_id, run_id = _sql_script(client, sql,", "raise an error. first_completed = done.pop() output_file = client.jobs.list_runs_outputs( first_completed.job_id,", "table. All provided files will be loaded as an atomic", "table columns and the file columns have a type mismatch,", "db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),", "fut.result()['result_columns'] except Exception as exc: # NOQA log.debug(\"Failed to retrieve", "column to use as the sortkey for the table. sortkey2", "file_columns)): # for the purposes of type checking, we care", "col_a_index = columns.index(\"column_a\") >>> col_a = [row[col_a_index] for row in", "otherwise a `pandas` `DataFrame`. Note that if `use_pandas` is ``False``,", "of ' 'columns. Expected {} columns but file {} '", "the import job as soon as it completes. hidden :", "for fid in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns,", ": Read directly into memory without SQL. civis.io.civis_to_csv : Write", "does not match ' 'detected delimiter for {}: \"{}\"'.format( delimiter,", "bool, optional A boolean value indicating whether or not the", "file compression. output_file_id: int The cleaned file's Civis ID. Used", "None: raise ValueError(\"Provide a schema as part of the `table`", "for {}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if compression !=", "headers = fut.result()['result_columns'] except Exception as exc: # NOQA log.debug(\"Failed", "== 'zip': with TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv')", "risk of a length-related import failure loosen_types = len(file_id) >", "job_name, credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id = client.get_database_id(database)", "they have the same precision and length # (e.g VARCHAR(42),", "return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql,", "APIClient() script_id, run_id = _sql_script(client=client, sql=sql, database=database, job_name=job_name, credential_id=credential_id, csv_settings=csv_settings,", "key. last_modified_keys: list[str], optional A list of the columns indicating", "provided values. Parameters ---------- detected_info: Dict[str, Any] The detected info", "in v2.0. Tables will always be written with column names", "maximum number of rows with errors to remove from the", "be a list, tuple or None\") select = \", \".join(columns)", "except ImportError: NO_PANDAS = True CHUNK_SIZE = 32 * 1024", "hidden, polling_interval=None): cleaning_futures = [] for fid in file_ids: cleaner_job", "arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers, compression): def callback(future):", "tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null names in columns from file", "urls. This function is intended for unloading large queries/tables from", "headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None, archive=False, hidden=True, **kwargs):", "should have keys for column \"name\" and \"sqlType\". The import", "file_id = outputs[0][\"file_id\"] log.debug('Exported results to Civis file %s', file_id)", "returns two sets, it is possible # That done contains", "+ chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress =", "files have the same ' 'compression.'.format( compression, detected_info['compression']) ) def", "return data def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True,", "changes in state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.).", "be the database name or ID. columns : list, optional", "will be used. include_header: bool, optional If ``True`` include a", "retrieving the data. The first execution of the custom SQL", "database, job_name, credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),", "def civis_file_to_table(file_id, database, table, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None, sortkey1=None,", "IDs. Reference by name to this argument is deprecated, as", "as tmp_file: tmp_file.write(headers) _decompress_stream(response, tmp_file) with zipfile.ZipFile(local_path, 'w') as fout:", "provided compression was {}' ' but detected compression {}. Please", ":class:`~civis.futures.CivisFuture` A `CivisFuture` object. Raises ------ CivisImportError If multiple files", "return else: url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"] log.debug('Exported results", "headers and once to retrieve the data. This is done", ":func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed into :func:`python:csv.reader` if", "\"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) ) if compression != detected_info['compression']: raise", "headers, compression, delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check that", "it completes. hidden : bool, optional If ``True`` (the default),", "compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut =", "Civis file \"\"\" if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is", "Civis file Parameters ---------- sql : str The SQL select", ">>> columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>> col_a", "(script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def", "\"\"\"Check that base column types match those current defined for", "table. credential_id : str or int, optional The ID of", "= False # Use Preprocess endpoint to get the table", "for the file compression. output_file_id: int The cleaned file's Civis", "' 'please ensure all imported files either ' 'have a", "API for the file. output_obj_id: int The file ID under", "chunk d = zlib.decompressobj(zlib.MAX_WBITS | 32) else: to_decompress = d.unconsumed_tail", "str or int Execute the query against this database. Can", ":class:`~civis.futures.CivisFuture` A future which returns the response from :func:`civis.APIClient.scripts.get_sql_runs` after", "response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression): response = requests.get(url, stream=True)", "be executed twice; once to attempt to retrieve headers and", "keys: 'id': int File ID 'name': str Filename 'size': int", "export_job = client.scripts.post_sql(job_name, remote_host_id=db_id, credential_id=credential_id, sql=sql, hidden=hidden, csv_settings=csv_settings) run_job =", "care only that the types # share a base type", "or tablenames with periods must be double quoted, e.g. ``'my_schema.\"my.table\"'``.", "does to provide a single output file # with headers", "names in the Civis Table. credential_id : str or int,", "schema and table you want to upload to. E.g., ``'scratch.table'``.", "fut : :class:`~civis.futures.CivisFuture` A future which returns the response from", "the columns indicating a record has been updated. If existing_table_rows", "ValueError(\"Cannot parse schema and table. \" \"Does '{}' follow the", "def callback(future): if not future.succeeded(): return outputs = future.result().get(\"output\") if", "as an argument delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must", "'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter,", "executed. database : str or int Export data from this", "def csv_to_civis(filename, database, table, api_key=None, client=None, max_errors=None, existing_table_rows=\"fail\", diststyle=None, distkey=None,", "other files will be # compared to this one. If", "civis.APIClient.scripts.post_sql \"\"\" if client is None: client = APIClient(api_key=api_key) delimiter", "archive=False, hidden=True, **kwargs): \"\"\"Upload a `pandas` `DataFrame` into a Civis", "civis_to_file(outputs[0]['file_id'], buf, client=client) txt = io.TextIOWrapper(buf, encoding='utf-8') txt.seek(0) unload_manifest =", "custom SQL string. The custom SQL string will be executed", "client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database,", "in the same order, and be in the same format.", "isinstance(file_id, int): file_id = [file_id] if schema is None: raise", "list of dictionaries corresponding to the columns in the source", "be a list of strings. Raises ------ ImportError If `use_pandas`", "job. If omitted, a random job name will be used.", "== 'drop') and table_columns is None) cleaning_futures = _run_cleaning(file_id, client,", "run_id = _sql_script(client, sql, database, job_name, credential_id, hidden, csv_settings=csv_settings) fut", "delimiter, \"delimiter must be one of {}\".format(DELIMITERS.keys()) csv_settings = dict(include_header=include_header,", "polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database, table,", "= 'begin read only; select * from ({}) limit 1'.format(sql)", "this option will allow the Civis API # to increase", "# compared to this one. If inconsistencies are detected, raise", "the :envvar:`CIVIS_API_KEY` environment variable will be used. client : :class:`civis.APIClient`,", "refresh column statistics as part of the run. If \"delayed\",", "len(file_id) > 1 import_name = 'CSV import to {}.{}'.format(schema, table)", "= CivisFuture(client.scripts.get_sql_runs, (script_id, run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0',", "# Instantiate client here in case users provide a (deprecated)", "unquoted: bool, optional Whether or not to quote fields. Default:", "columns in the same order, and be in the same", ">>> fut.result() \"\"\" if client is None: client = APIClient(api_key=api_key)", "are quoted. 'compression': str Type of compression used. 'delimiter': str", "in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) name = path.basename(filename)", "log.debug('Started run %d for import %d', fut.run_id, import_job.id) return fut", "archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs =", "new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden,", "detected by the Civis API for the file. output_obj_id: int", "run_job.id, export_job.id) return export_job.id, run_job.id def _get_sql_select(table, columns=None): if columns", "or client.default_credential if delimiter is not None: # i.e. it", "str The query. 'header': list of str The columns from", "is not None else \"*\" sql = \"select {} from", "client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs: raise EmptyResultError(\"Query {} returned no", "list[Dict[str, str]], optional A list of dictionaries corresponding to the", "client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id,", "job_name : str, optional A name to give the job.", "is None: client = APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is deprecated", "concurrent imports to the same destination table. delimiter : string,", "= client.imports.post_files_csv( source, destination, headers, name=import_name, max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression,", "in a Civis file \"\"\" if use_pandas and NO_PANDAS: raise", "archive=False, hidden=True, polling_interval=None): \"\"\"Export data from Civis to a local", "cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden", "df = read_civis(\"schema.table\", \"my_data\", use_pandas=True) >>> col_a = df[\"column_a\"] See", "zipfile.ZipFile(local_path, 'w') as fout: arcname = path.basename(local_path) if arcname.split('.')[-1] ==", "ID. table : str The schema and table you want", "The database credential ID. If ``None``, the default credential will", "client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data def export_to_civis_file(sql,", "('https://...') 'unquoted': bool Whether the cells are quoted. 'compression': str", "client.default_credential if delimiter is not None: # i.e. it was", "``True`` or passed into :func:`python:csv.reader` if `use_pandas` is ``False``. Returns", "Civis file to a Civis table \"\"\" client = client", "open(local_path, 'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers", "delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all results from", "SQL query into memory. civis.io.export_to_civis_file : Store a SQL query's", "as pd >>> df = pd.DataFrame({'a': [1, 2, 3], 'b':", "%d for import %d', fut.run_id, import_job.id) return fut def _sql_script(client,", "ValueError: table_exists = False # Use Preprocess endpoint to get", "Can be the database name or ID. columns : list,", "sql = \"SELECT * FROM schema.table\" >>> fut = civis_to_csv(\"file.csv\",", "client.jobs.list_runs_outputs( first_completed.job_id, first_completed.run_id )[0] detected_info = client.files.get(output_file.object_id).detected_info table_columns = (detected_info['tableColumns']", "= [row[col_a_index] for row in data] Notes ----- This reads", "_input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ...", "given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client :", "accommodate multiple concurrent imports to the same destination table. delimiter", "False. In a future release, a ``'gzip'`` compressed file will", "of compression used. 'delimiter': str Delimiter that separates the cells.", "``None``, the default credential will be used. polling_interval : int", "must be one of {}\" .format(DELIMITERS.keys())) # always set compression", "Defaults to ``'fail'``. diststyle : str, optional The distribution style", "perform necessary file cleaning need_table_columns = ((not table_exists or existing_table_rows", "file %s.\" % (job_id, run_id, filename), RuntimeWarning) return else: url", "memory. civis.io.civis_to_csv : Write directly to a CSV file. civis.io.civis_file_to_table", "set compression to gzip to reduce I/O csv_settings = dict(include_header=include_header,", "Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes -----", "should be treated as headers. The default, ``None``, attempts to", "is ``True`` or passed into :func:`python:csv.reader` if `use_pandas` is ``False``.", "headers = detected_info['includeHeader'] if delimiter is None: delimiter = detected_info['columnDelimiter']", "installed. Examples -------- >>> sql = \"SELECT * FROM schema.table\"", "'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename', 'export_to_civis_file'] DELIMITERS = {", "quoted. 'compression': str Type of compression used. 'delimiter': str Delimiter", "__all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis', 'civis_file_to_table', 'split_schema_tablename',", "One of ``'even'``, ``'all'`` or ``'key'``. distkey : str, optional", "to remove from the import before failing. If multiple files", "execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id') def civis_file_to_table(file_id, database,", "is ``True`` and `pandas` is not installed. Examples -------- >>>", "= [] done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED ) #", "list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename, sql, database,", "tempfile import TemporaryDirectory import warnings import zlib import gzip import", "str Upload the contents of this file. database : str", "raise CivisImportError('Provided delimiter \"{}\" does not match ' 'detected delimiter", "in v2.0.0. database : str or int Upload data into", "are given and determined to be incompatible for import. This", "only;' to ensure we can't change state sql = 'begin", "If `use_pandas` is ``True`` and `pandas` is not installed. Examples", "custom SQL is controlled such that changes in state cannot", "detected_info['includeHeader'] if delimiter is None: delimiter = detected_info['columnDelimiter'] compression =", "-------- civis.io.read_civis : Read table contents into memory. civis.io.read_civis_sql :", "client = APIClient(api_key=api_key) if archive: warnings.warn(\"`archive` is deprecated and will", "'detection'.format(table=table)) table_exists = True except ValueError: table_exists = False #", "this database. Can be the database name or ID. columns", "applies across all files combined. existing_table_rows : str, optional The", "the file compression. output_file_id: int The cleaned file's Civis ID.", "The behaviour if a table with the requested name already", "a ``'gzip'`` compressed file will be returned for all cases.", "max_errors=max_errors, existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden", "format. Parameters ---------- file_id : int or list[int] Civis file", "fut.result() \"\"\" if client is None: client = APIClient() schema,", "use as the distkey for the table. sortkey1 : str,", "'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None,", "in a compound sortkey for the table. table_columns : list[Dict[str,", "to provided values. Parameters ---------- detected_info: Dict[str, Any] The detected", "hours. In addition, if existing_table_rows is \"upsert\", delayed executions move", "chunk = response.raw.read(CHUNK_SIZE) def _download_file(url, local_path, headers, compression): response =", "%s', file_id) return _download_file(url, filename, headers, compression) return callback def", "are passed into :func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or passed", "under consideration; used for error messaging. Raises ------ CivisImportError If", "assert delimiter, \"delimiter must be one of {}\".format(DELIMITERS.keys()) csv_settings =", "zlib.decompressobj(zlib.MAX_WBITS | 32) while chunk or d.unused_data: if d.unused_data: to_decompress", "optional A user specified filename prefix for the output file", ">>> import pandas as pd >>> df = pd.DataFrame({'a': [1,", "the values detected on the file do not match their", "---------- filename : str Download exported data into this file.", "If existing_table_rows is \"upsert\", this field is required. escaped: bool,", "to the same destination table. credential_id : str or int,", "any output to \" \"download. Not creating file %s.\" %", "when gzip compression is requested, a gzip file is not", "{} returned no manifest.\" .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf,", "= _sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden, csv_settings=csv_settings) fut =", "1024 log = logging.getLogger(__name__) __all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv', 'civis_to_multifile_csv',", "local CSV file. The custom SQL string will be executed", "({}) limit 1'.format(sql) fut = query_civis(sql, database, client=client, credential_id=credential_id, polling_interval=polling_interval)", "------ CivisImportError If the values detected on the file do", "for the table. table_columns : list[Dict[str, str]], optional A list", "from file cleaning. Returns -------- column_list: list[dict] \"\"\" new_cols =", "be headers. Default: ``True``. compression: str, optional Type of compression", "the default credential will be used. primary_keys: list[str], optional A", "existing_table_rows == 'drop') and table_columns is None) cleaning_futures = _run_cleaning(file_id,", "error_msgs.append( 'Column {}: File base type was {}, but expected", "str, optional The column to use as the distkey for", "def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids = [] done,", "delimiter): cleaned_file_ids = [] done, still_going = concurrent.futures.wait( cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED", "in the Civis UI. **kwargs : kwargs Extra keyword arguments", "' but detected compression {}. Please ' 'ensure all imported", "Civis ID. Used for debugging. Raises ------ CivisImportError If the", "'Column {}: File base type was {}, but expected {}'.format(", "``schema`` may be None if the input is only a", "this limit applies across all files combined. existing_table_rows : str,", "'scratch.df_table') >>> fut.result() See Also -------- :func:`~pandas.DataFrame.to_csv` \"\"\" if client", "distributed databases export # data at scale. headers = _get_headers(client,", "column_list: list[dict] the list of columns from file cleaning. Returns", "ID. If ``None``, the default credential will be used. include_header:", "= \"SELECT * FROM schema.table\" >>> fut = export_to_civis_file(sql, \"my_database\")", "See Also -------- civis.io.read_civis_sql : Read directly into memory using", "same ' 'compression.'.format( compression, detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers,", "same destination table. credential_id : str or int, optional The", "rows with errors to remove from the import before failing.", "row of the file should be treated as headers. The", "= BytesIO() >>> civis_to_file(ids[0], buf) >>> buf.seek(0) >>> df =", "CHUNK_SIZE) # write headers and decompress the stream elif compression", "== 1: schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup) != 2:", "dict A dictionary resembling an AWS manifest file. Has the", "-------- >>> import pandas as pd >>> df = pd.DataFrame({'a':", "cleaning - other files will be # compared to this", "to upload to Civis. database : str or int Upload", "name. \"\"\" reader = csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup =", "data types provided, and decreases the # risk of a", ": :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes ----- This reads the", "run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run %d for import %d', fut.run_id,", "multiple concurrent imports to the same destination table. delimiter :", "with column names read from the DataFrame. Use the `header`", "cleaning_futures = [] for fid in file_ids: cleaner_job = client.files.post_preprocess_csv(", "the cells. Examples -------- >>> sql = \"SELECT * FROM", "dictionary of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns ------- fut", "if compression != detected_info['compression']: raise CivisImportError('Mismatch between detected and provided", "'id': int File ID 'name': str Filename 'size': int File", "between checks for job completion. hidden : bool, optional If", "Write directly to csv. civis.io.export_to_civis_file : Store a SQL query's", "= fut.result()[\"output\"] if not outputs: raise EmptyResultError(\"Unload query {} returned", ":func:`python:csv.reader`. job_name : str, optional A name to give the", "if compression == 'gzip' and include_header: compression = 'none' #", "any. One of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter:", "table. delimiter : string, optional The column delimiter. One of", "existing_table_rows=existing_table_rows, column_delimiter=delimiter, compression=compression, escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden )", "input is only a table name, but the ``tablename`` will", "required. escaped: bool, optional A boolean value indicating whether or", "same precision and length # (e.g VARCHAR(42), DECIMAL(8, 10)) tcol_base_type", "columns for the table to be created. file_columns: List[Dict[str, str]]", "headers prepended to it due to how distributed databases export", "to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name = split_schema_tablename(table) file_id = file_to_civis(tmp_path,", "always be written with column names read from the DataFrame.", "database, job_name, credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name) db_id =", "imported files have the same ' 'compression.'.format( compression, detected_info['compression']) )", "into memory. civis.io.read_civis_sql : Read results of a SQL query", "``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or ``'upsert'``. Defaults to ``'fail'``. diststyle", "imported files either ' 'have a header or do not.')", "be removed in v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) if", "fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False, job_name=None, api_key=None, client=None,", "= client.get_database_id(database) credential_id = credential_id or client.default_credential # Try to", "((not table_exists or existing_table_rows == 'drop') and table_columns is None)", "arcname = arcname.split('.')[0] + '.csv' fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id,", "no compression unless include_header is set to False. In a", "concurrent imports to the same destination table. polling_interval : int", "checks for query completion. archive : bool, optional (deprecated) If", "sql = _get_sql_select(table, columns) data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas, job_name=job_name,", "unquoted=False, archive=False, hidden=True, polling_interval=None): \"\"\"Export data from Civis to a", "else a single table name. Returns ------- schema, tablename A", "headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns = _replace_null_column_names(table_columns)", "len(file_columns): raise CivisImportError('All files should have the same number of", "cStringIO import StringIO try: import pandas as pd NO_PANDAS =", "from civis.utils import run_job from civis._deprecation import deprecate_param import requests", "both files endpoint IDs and presigned S3 urls. Parameters ----------", "The columns detected by the Civis API for the file.", "\"\"\" new_cols = [] for i, col in enumerate(column_list): #", "'none' # don't support parallel unload; the output format #", "a Civis table \"\"\" client = client or APIClient() script_id,", "remove from the import before failing. existing_table_rows : str, optional", "Civis to a local CSV file. The custom SQL string", "table = split_schema_tablename(table) if isinstance(file_id, int): file_id = [file_id] if", "civis.futures import CivisFuture from civis.io import civis_to_file, file_to_civis, query_civis from", "as pd NO_PANDAS = False except ImportError: NO_PANDAS = True", "optional A dictionary of csv_settings to pass to :func:`civis.APIClient.scripts.post_sql`. Returns", "the Civis API. headers: bool The provided value for whether", "client=client, poll_on_creation=False) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f)", "purposes of type checking, we care only that the types", "headers # then response.iter_content will decompress the stream # however,", "from civis._utils import maybe_get_random_name from civis.base import EmptyResultError, CivisImportError from", "same destination table. polling_interval : int or float, optional Number", "complete See Also -------- civis.io.read_civis : Read table contents into", ": bool, optional [DEPRECATED] Whether or not the first row", "filename), RuntimeWarning) return else: url = outputs[0][\"path\"] file_id = outputs[0][\"file_id\"]", "dictionary containing a list of column names. Default: ``True``. compression:", "str, optional The behaviour if a table with the requested", "with open(local_path, 'wb') as fout: fout.write(headers) _decompress_stream(response, fout) # decompress", "csv.reader(StringIO(str(table)), delimiter=\".\", doublequote=True, quotechar='\"') schema_name_tup = next(reader) if len(schema_name_tup) ==", "cleaned_file_ids, headers, compression, delimiter, table_columns def _check_column_types(table_columns, file_columns, output_obj_id): \"\"\"Check", "from redshift as it uses a 'PARALLEL ON' S3 unload.", "StringIO except ImportError: from cStringIO import StringIO try: import pandas", "Civis UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object.", "file_columns = detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression,", "are more columns in the table. delimiter : string, optional", "Whether or not the first row of the file should", "not the source file has quotes escaped with a backslash.", "str]], optional A list of dictionaries corresponding to the columns", "possible # That done contains more than one Future. Thus", "not given, the :envvar:`CIVIS_API_KEY` environment variable will be used. client", "raise CivisImportError('Mismatch between detected and provided ' 'compressions - provided", "unload. It returns a similar manifest file to conventional S3", "if client is None: # Instantiate client here in case", "``None``, the default credential will be used. primary_keys: list[str], optional", "argument delimiter = DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one", "if headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data", "single table name. Returns ------- schema, tablename A 2-tuple of", "headers is None: headers = detected_info['includeHeader'] if delimiter is None:", "account # for these possible completed cleaning runs while waiting", "'comma', '\\t': 'tab', '|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table,", "SQL query and return presigned urls. This function is intended", "share the same columns in the same order, and be", "if columns is not None else \"*\" sql = \"select", "If the table columns and the file columns have a", "to upload to. E.g., ``'scratch.table'``. Schemas or tablenames with periods", "was provided as an argument delimiter = DELIMITERS.get(delimiter) assert delimiter,", "don't support parallel unload; the output format # is different", "return unload_manifest @deprecate_param('v2.0.0', 'api_key', 'headers') def dataframe_to_civis(df, database, table, api_key=None,", "download if compression == 'gzip' and include_header: compression = 'none'", "civis.utils import run_job from civis._deprecation import deprecate_param import requests try:", "name to this argument is deprecated, as the name will", "def _sql_script(client, sql, database, job_name, credential_id, hidden=False, csv_settings=None): job_name =", "isinstance(columns, (list, tuple)): raise TypeError(\"columns must be a list, tuple", "\"\"\"Upload the contents of one or more Civis files to", "fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED) def _download_callback(job_id, run_id, filename, headers, compression): def", "table for a deferred statistics update; column statistics may not", "# gzipped buffers can be concatenated so write headers as", "either ' 'have a header or do not.') if delimiter", "have the same precision and length # (e.g VARCHAR(42), DECIMAL(8,", "\"Use `hidden` instead.\", FutureWarning) headers = False if kwargs.get('header') is", "concurrent imports to the same destination table. credential_id : str", "be included in a schema or table name if delimited", "to ``'fail'``. diststyle : str, optional The distribution style for", "job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results of a", "name, client=client) log.debug('Uploaded file %s to Civis file %s', filename,", "database, columns=None, use_pandas=False, job_name=None, api_key=None, client=None, credential_id=None, polling_interval=None, archive=False, hidden=True,", "b'' delimiter = DELIMITERS.get(delimiter) if not delimiter: raise ValueError(\"delimiter must", "cred_id = credential_id or client.default_credential if delimiter is not None:", "---------- table_columns: List[Dict[str, str]] The columns for the table to", "polling_interval=None): cleaning_futures = [] for fid in file_ids: cleaner_job =", "Civis file %s (%s)', outputs[0][\"output_name\"], file_id) if use_pandas: # allows", "headers due to %s\", str(exc)) return headers def _decompress_stream(response, buf,", "state sql = 'begin read only; select * from ({})", "combined with a \".\", or else a single table name.", "``True`` and `pandas` is not installed. Examples -------- >>> table", "optional The maximum number of rows with errors to remove", "'gzip': with gzip.open(local_path, 'wb') as fout: fout.write(headers) with open(local_path, 'ab')", "the file contains errors. delimiter: str The provided value for", "response = requests.get(url, stream=True) response.raise_for_status() # gzipped buffers can be", "(with header as first row) if `use_pandas` is ``False``, otherwise", "sql = \"SELECT * FROM schema.table\" >>> df = read_civis_sql(sql,", "a base type (e.g. INT, VARCHAR, DECIMAl) rather than that", "decompressed during download if compression == 'gzip' and include_header: compression", "into memory without SQL. civis.io.civis_to_csv : Write directly to a", "'|': 'pipe', } @deprecate_param('v2.0.0', 'api_key') def read_civis(table, database, columns=None, use_pandas=False,", "# use 'begin read only;' to ensure we can't change", "' 'detected delimiter for {}: \"{}\"'.format( delimiter, output_file_id, detected_info[\"columnDelimiter\"]) )", ": dict, optional A dictionary of csv_settings to pass to", "return fut @deprecate_param('v2.0.0', 'api_key') def civis_to_multifile_csv(sql, database, job_name=None, api_key=None, client=None,", "name already exists. One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or", "table_exists or existing_table_rows == 'drop') and table_columns is None) cleaning_futures", "the Civis UI. Returns ------- unload_manifest: dict A dictionary resembling", "# they have the same precision and length # (e.g", "destination table. credential_id : str or int, optional The ID", "<filename>civis/io/_tables.py import json import concurrent.futures import csv from os import", "1: schema_name_tup = (None, schema_name_tup[0]) if len(schema_name_tup) != 2: raise", "to provide a single output file # with headers prepended", "will attempt to auto-detect. headers : bool, optional Whether or", "execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id, client=client,", "detected compression {}. Please ' 'ensure all imported files have", "key column(s) of the destination table that uniquely identify a", "instead of `df` as the first argument to this function.", "reads the contents of `filename` into memory. Examples -------- >>>", "files are provided, this limit applies across all files combined.", "optional The column to use as the distkey for the", "Each dictionary should have keys for column \"name\" and \"sqlType\".", "APIClient(api_key=api_key) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential #", "identify a record. If existing_table_rows is \"upsert\", this field is", "any. One of ``','``, ``'\\t'``, or ``'|'``. Default: ``','``. unquoted:", "retrieve headers due to %s\", str(exc)) return headers def _decompress_stream(response,", "'Encountered the following errors for file {}:\\n\\t{}'.format( output_obj_id, '\\n\\t'.join(error_msgs) )", "run_id), polling_interval=polling_interval, client=client, poll_on_creation=False) outputs = fut.result()[\"output\"] if not outputs:", "client.scripts.post_sql_runs(export_job.id) log.debug('Started run %d of SQL script %d', run_job.id, export_job.id)", "directly to csv. civis.io.export_to_civis_file : Store a SQL query's results", "return_when=concurrent.futures.FIRST_COMPLETED ) # Set values from first completed file cleaning", "more than one Future. Thus it is necessary to account", "E.g., ``'scratch.table'``. Schemas or tablenames with periods must be double", "export_to_civis_file(sql, \"my_database\") >>> file_id = fut.result()['output'][0][\"file_id\"] See Also -------- civis.io.read_civis", "is not installed. Examples -------- >>> table = \"schema.table\" >>>", "into memory. See Also -------- civis.io.read_civis : Read directly into", "client=client, poll_on_creation=False) return fut @deprecate_param('v2.0.0', 'api_key') def read_civis_sql(sql, database, use_pandas=False,", "change headers = b'' delimiter = DELIMITERS.get(delimiter) if not delimiter:", "be # compared to this one. If inconsistencies are detected,", "table, including schema, in the database. E.g. ``'my_schema.my_table'``. Schemas or", "optional The column to use as the sortkey for the", "| 32) while chunk or d.unused_data: if d.unused_data: to_decompress =", ">>> fut = civis.io.civis_file_to_table(file_id, ... 'my-database', ... 'scratch.my_data') >>> fut.result()", "files will be # compared to this one. If inconsistencies", "fcol_base_type: error_msgs.append( 'Column {}: File base type was {}, but", "resembling an AWS manifest file. Has the following keys: 'query':", "or more Civis files to a Civis table. All provided", "api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False, archive=False, hidden=True, polling_interval=None):", "of a SQL query into memory. civis.io.export_to_civis_file : Store a", "not the first row contains headers. credential_id : str or", "sortkey2 : str, optional The second column in a compound", "exc: # NOQA log.debug(\"Failed to retrieve headers due to %s\",", "concurrent.futures.wait returns two sets, it is possible # That done", "all cases. delimiter: str, optional Which delimiter to use, if", "to use, if any. One of ``','``, ``'\\t'``, or ``'|'``.", "sql, database, job_name, credential_id, hidden, csv_settings=csv_settings) fut = CivisFuture(client.scripts.get_sql_runs, (script_id,", "return headers def _decompress_stream(response, buf, write_bytes=True): # use response.raw for", "delimiter = detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression,", "job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return data def", ") table_columns = table_columns or cleaned_table_columns source = dict(file_ids=cleaned_file_ids) destination", "requested, a gzip file is not actually returned # instead", "use 'begin read only;' to ensure we can't change state", "be used. include_header: bool, optional If ``True``, the first line", "# risk of a length-related import failure loosen_types = len(file_id)", "or \"immediate\". If \"immediate\", refresh column statistics as part of", "sql = \"select {} from {}\".format(select, table) return sql def", "to quote fields. Default: ``False``. polling_interval : int or float,", "the query. 'entries': list of dict Each dict has the", "in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers, column_delimiter=delimiter, hidden=hidden ) cleaning_futures.append(run_job(cleaner_job.id, client=client, polling_interval=polling_interval))", "polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results of a query to a", "data def export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None):", "this job will not appear in the Civis UI. csv_settings", "of ``'none'``, ``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter: str, optional", "provided value for the file compression. output_file_id: int The cleaned", "then response.iter_content will decompress the stream # however, our use", "use_pandas=True) >>> col_a = df[\"column_a\"] See Also -------- civis.io.read_civis_sql :", "headers, need_table_columns, delimiter): cleaned_file_ids = [] done, still_going = concurrent.futures.wait(", "If inconsistencies are detected, raise an error. first_completed = done.pop()", "= maybe_get_random_name(job_name) db_id = client.get_database_id(database) credential_id = credential_id or client.default_credential", "= detected_info['tableColumns'] _check_column_types(table_columns, file_columns, output_file.object_id) _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id)", "provided ' 'compressions - provided compression was {}' ' but", "return _download_file(url, filename, headers, compression) return callback def split_schema_tablename(table): \"\"\"Split", "new_cols def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter, hidden, polling_interval=None): cleaning_futures", "these possible completed cleaning runs while waiting on those which", "max_file_size=None, unquoted=False, prefix=None, polling_interval=None, hidden=True): \"\"\"Unload the result of SQL", "of the primary key column(s) of the destination table that", "Ensure that all results from files are correctly accounted for", "argument is deprecated, as the name will change in v2.0.0.", "database, delimiter=delimiter) >>> ids = [entry['id'] for entry in manifest['entries']]", "not be included. To store the index along with the", "new_col = dict(col) if new_col.get('name') is None: new_col['name'] = 'column_{}'.format(i)", "parameter _kwargs = {'names': headers} _kwargs.update(kwargs) _kwargs['compression'] = 'gzip' data", "columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\") >>> col_a =", "include_header is set to False. In a future release, a", "\"\"\" if client is None: client = APIClient(api_key=api_key) delimiter =", "table. sortkey1 : str, optional The column to use as", "compression == 'gzip': with gzip.open(local_path, 'wb') as fout: fout.write(headers) with", "if the input is only a table name, but the", "return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null names in columns from", "data = list(csv.reader(buf, **kwargs)) return data @deprecate_param('v2.0.0', 'api_key') def civis_to_csv(filename,", "force_multifile=False) script_id, run_id = _sql_script(client, sql, db_id, job_name, credential_id, hidden=hidden,", "column names. Column SQL transformations are possible. If omitted, all", "separates the cells. Examples -------- >>> sql = \"SELECT *", "``','``, ``'\\t'``, or ``'|'``. Default: ``'|'``. max_file_size: int, optional Maximum", "for whether or not the file contains errors. delimiter: str", "(tcol, fcol) in enumerate(zip(table_columns, file_columns)): # for the purposes of", "effect in versions >= 1.11 and will be removed in", "UI. Returns ------- results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes", "get headers separately. In most scenarios this will greatly #", "the API. include_header = True if headers is None else", "stream elif compression == 'none': with open(local_path, 'wb') as fout:", "first completed file cleaning - other files will be #", ": Upload a Civis file to a Civis table \"\"\"", "returned # instead the gzip file is decompressed during download", "Defaults to false. execution: string, optional, default \"immediate\" One of", "and determined to be incompatible for import. This may be", "whether or not the source file(s) escape quotes with a", "dictionary resembling an AWS manifest file. Has the following keys:", "filename, headers, compression): def callback(future): if not future.succeeded(): return outputs", "schema as part of the `table` input.\") db_id = client.get_database_id(database)", "pattern 'schema.table'?\" .format(table)) return tuple(schema_name_tup) def _replace_null_column_names(column_list): \"\"\"Replace null names", "will be used. include_header: bool, optional If ``True``, the first", "True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id, run_id)[\"output\"] if not outputs:", "or existing_table_rows == 'drop') and table_columns is None) cleaning_futures =", "table. Parameters ---------- table : str Name of table, including", "\", \".join(columns) if columns is not None else \"*\" sql", "v2.0.0. \" \"Use `hidden` instead.\", FutureWarning) headers = False if", "pd.read_csv(url, **_kwargs) else: response = requests.get(url, stream=True) response.raise_for_status() with StringIO()", "``'gzip'``. Default ``'none'``. delimiter: str, optional Which delimiter to use,", "the first line of the CSV will be headers. Default:", ": str Upload the contents of this file. database :", "and table you want to upload to. E.g., ``'scratch.table'``. api_key", "'zip': with TemporaryDirectory() as tmp_dir: tmp_path = path.join(tmp_dir, 'civis_to_csv.csv') with", "to how distributed databases export # data at scale. headers", "export_to_civis_file(sql, database, job_name=None, client=None, credential_id=None, polling_interval=None, hidden=True, csv_settings=None): \"\"\"Store results", "`DataFrame`'s index will not be included. To store the index", "sql query has completed and the result has been stored", "if `use_pandas` is ``False``, otherwise a `pandas` `DataFrame`. Note that", "------- unload_manifest: dict A dictionary resembling an AWS manifest file.", "requires a primary key. last_modified_keys: list[str], optional A list of", "a Civis file to a Civis table \"\"\" client =", "uses a 'PARALLEL ON' S3 unload. It returns a similar", "Civis UI. csv_settings : dict, optional A dictionary of csv_settings", "------- schema, tablename A 2-tuple of strings. The ``schema`` may", "or not the source file has quotes escaped with a", "in # their precisions/lengths - setting this option will allow", "of content-encoding is inconsistent chunk = response.raw.read(CHUNK_SIZE) d = zlib.decompressobj(zlib.MAX_WBITS", "= dict(include_header=include_header, compression='gzip', column_delimiter=delimiter, unquoted=unquoted, filename_prefix=None, force_multifile=False) script_id, run_id =", "EmptyResultError(\"Query {} returned no output.\" .format(script_id)) url = outputs[0][\"path\"] file_id", "multiple concurrent imports to the same destination table. polling_interval :", "def split_schema_tablename(table): \"\"\"Split a Redshift 'schema.tablename' string Remember that special", "def f(x): return client.scripts.put_sql_archive(script_id, True) fut.add_done_callback(f) fut.result() outputs = client.scripts.get_sql_runs(script_id,", "headers, compression): response = requests.get(url, stream=True) response.raise_for_status() # gzipped buffers", "is controlled such that changes in state cannot occur (e.g.,", "different which would introduce a breaking change headers = b''", "None: headers = detected_info['includeHeader'] if delimiter is None: delimiter =", "columns have a type mismatch, or differ in count. \"\"\"", "compression. output_file_id: int The cleaned file's Civis ID. Used for", "cells are quoted. 'compression': str Type of compression used. 'delimiter':", "------- fut : :class:`~civis.futures.CivisFuture` A future which returns the response", "polling_interval=polling_interval, client=client, poll_on_creation=False) if archive: def f(x): return client.scripts.put_sql_archive(script_id, True)", "headers separately. In most scenarios this will greatly # reduce", "returned no manifest.\" .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client)", "int The file ID under consideration; used for error messaging.", "sortkey1=None, sortkey2=None, table_columns=None, headers=None, credential_id=None, primary_keys=None, last_modified_keys=None, execution=\"immediate\", delimiter=None, polling_interval=None,", "= credential_id or client.default_credential csv_settings = csv_settings or {} export_job", "Also -------- civis.io.read_civis : Read directly into memory without SQL.", "data from Civis using a custom SQL string. The custom", "= APIClient(api_key=api_key) if use_pandas and NO_PANDAS: raise ImportError(\"use_pandas is True", "fut = civis.io.csv_to_civis('input_file.csv', ... 'my-database', ... 'scratch.my_data') >>> fut.result() \"\"\"", "``'zip'``, or ``'gzip'``. Default ``'none'``. delimiter: str, optional Which delimiter", "str or int Upload data into this database. Can be", "CivisImportError If the values detected on the file do not", "want to upload to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional", "database name or ID. job_name : str, optional A name", "schema, table = split_schema_tablename(table) if isinstance(file_id, int): file_id = [file_id]", "= \"my_database\" >>> delimiter = \"|\" >>> manifest = civis_to_multifile_csv(sql,", "not be available for up to 24 hours. In addition,", "of dictionaries corresponding to the columns in the source file.", "the following keys: 'query': str The query. 'header': list of", "file %s', filename, file_id) fut = civis_file_to_table(file_id, database, table, client=client,", "in bytes 'url': str Unsigned S3 URL ('s3://...') 'url_signed': str", "to_csv_kwargs = {'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _,", "results : :class:`~civis.futures.CivisFuture` A `CivisFuture` object. Notes ----- This reads", "delimiters are different, headers are present in some but not", "mismatch, or differ in count. \"\"\" if len(table_columns) != len(file_columns):", "database=database, use_pandas=use_pandas, job_name=job_name, client=client, credential_id=credential_id, polling_interval=polling_interval, archive=archive, hidden=hidden, **kwargs) return", "is performed and each row will be a list of", "primary_keys=primary_keys, last_modified_keys=last_modified_keys, escaped=escaped, execution=execution, polling_interval=polling_interval, hidden=hidden) return fut @deprecate_param('v2.0.0', 'file_id')", "table to final table after a brief delay, in order", "to :meth:`pandas:pandas.DataFrame.to_csv`. Returns ------- fut : :class:`~civis.futures.CivisFuture` A `CivisFuture` object.", "for the table to be created. file_columns: List[Dict[str, str]] The", "\"SELECT * FROM schema.table\" >>> fut = civis_to_csv(\"file.csv\", sql, \"my_database\")", "run_id, filename, headers, compression): def callback(future): if not future.succeeded(): return", "defined for the table. Parameters ---------- table_columns: List[Dict[str, str]] The", "files will be loaded as an atomic unit in parallel,", "in file_ids: cleaner_job = client.files.post_preprocess_csv( file_id=fid, in_place=False, detect_table_columns=need_table_columns, force_character_set_conversion=True, include_header=headers,", "run_job.id def _get_sql_select(table, columns=None): if columns and not isinstance(columns, (list,", "detected on the file do not match their expected attributes.", "ValueError(\"delimiter must be one of {}\" .format(DELIMITERS.keys())) # always set", "redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id, client=client, polling_interval=polling_interval) log.debug('Started run", "no manifest.\" .format(script_id)) buf = io.BytesIO() civis_to_file(outputs[0]['file_id'], buf, client=client) txt", "attempt to auto-detect. headers : bool, optional Whether or not", "but pandas is not installed.\") if archive: warnings.warn(\"`archive` is deprecated", "not have any output to \" \"download. Not creating file", "soon as it completes. hidden : bool, optional If ``True``", "periods must be double quoted, e.g. ``'my_schema.\"my.table\"'``. database : str", "file_id : int or list[int] Civis file ID or a", "col_a = df[\"column_a\"] >>> data = read_civis_sql(sql, \"my_database\") >>> columns", "credential_id = credential_id or client.default_credential # don't fix bug that", "fut = civis.io.dataframe_to_civis(df, 'my-database', ... 'scratch.df_table') >>> fut.result() See Also", "sql, database, job_name=None, api_key=None, client=None, credential_id=None, include_header=True, compression='none', delimiter=',', unquoted=False,", "a Civis table. The `DataFrame`'s index will not be included.", "to a CSV file. \"\"\" if client is None: client", "appropriately blank column name. Parameters ---------- column_list: list[dict] the list", "= detected_info['columnDelimiter'] compression = detected_info['compression'] _check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id)", "open('input_file.csv', 'w') as _input: ... _input.write('a,b,c\\\\n1,2,3') >>> fut = civis.io.csv_to_civis('input_file.csv',", "Number of seconds to wait between checks for query completion.", "the sql query has completed and the result has been", ">>> delimiter = \"|\" >>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter)", "schema, in the database. E.g. ``'my_schema.my_table'``. Schemas or tablenames with", "`pandas` `DataFrame` into a Civis table. The `DataFrame`'s index will", "civis.io.export_to_civis_file : Store a SQL query's results in a Civis", "database : str or int Execute the query against this", "StringIO() as buf: if headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf,", "be. unquoted: bool, optional Whether or not to quote fields.", "{'encoding': 'utf-8', 'index': False} to_csv_kwargs.update(kwargs) df.to_csv(tmp_path, **to_csv_kwargs) _, name =", "escaped=False, execution=\"immediate\", credential_id=None, polling_interval=None, archive=False, hidden=True): \"\"\"Upload the contents of", "table, client=client, max_errors=max_errors, existing_table_rows=existing_table_rows, diststyle=diststyle, distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, table_columns=table_columns, delimiter=delimiter,", "and should share the same columns in the same order,", "a custom SQL string. The custom SQL string will be", "file to conventional S3 UNLOAD statements except the CSV parts", "file will be. unquoted: bool, optional Whether or not to", "file Parameters ---------- sql : str The SQL select string", "columns from the query. 'entries': list of dict Each dict", "optional If ``True`` include a key in the returned dictionary", "it was provided as an argument delimiter = DELIMITERS.get(delimiter) assert", "output_file.object_id) cleaned_file_ids.append(output_file.object_id) # Ensure that all results from files are", "headers: buf.write(','.join(headers) + '\\n') _decompress_stream(response, buf, write_bytes=False) buf.seek(0) data =", "'ab') as fout: shutil.copyfileobj(response.raw, fout, CHUNK_SIZE) # write headers and", "# for the purposes of type checking, we care only", "-------- >>> sql = \"SELECT * FROM schema.my_big_table\" >>> database", "and table you want to upload to. E.g., ``'scratch.table'``. Schemas", "cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ---------- filename", "{} from {}\".format(select, table) return sql def _get_headers(client, sql, database,", "fields. Default: ``False``. prefix: str, optional A user specified filename", "regardless if there are more columns in the table. primary_keys:", "= _sql_script(client, sql, db_id, job_name, credential_id, csv_settings=csv_settings, hidden=hidden) fut =", "_check_all_detected_info(detected_info, headers, delimiter, compression, output_file.object_id) cleaned_file_ids.append(output_file.object_id) if need_table_columns: table_columns =", "of whether the destination database itself requires a primary key.", "default credential will be used. primary_keys: list[str], optional A list", "None if the input is only a table name, but", "raise ImportError(\"use_pandas is True but pandas is not installed.\") if", "from first completed file cleaning - other files will be", "instead.\", FutureWarning) name = path.basename(filename) with open(filename, \"rb\") as data:", "DELIMITERS.get(delimiter) assert delimiter, \"delimiter must be one of {}\".format( DELIMITERS.keys()", "removed in v2.0. Tables will always be written with column", "``'|'``. headers : bool, optional Whether or not the first", "log.debug(\"Failed to retrieve headers due to %s\", str(exc)) return headers", "if delimiter is None: delimiter = detected_info['columnDelimiter'] compression = detected_info['compression']", "the default credential will be used. polling_interval : int or", "state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.). Parameters ----------", "need_table_columns else None) if headers is None: headers = detected_info['includeHeader']", "but expected {}'.format( idx, fcol_base_type, tcol_base_type ) ) if error_msgs:", "%s, run %s does not have any output to \"", "environment variable will be used. client : :class:`civis.APIClient`, optional If", "execution of the custom SQL is controlled such that changes", "int, optional The ID of the database credential. If ``None``,", "write_bytes=True): # use response.raw for a more consistent approach #", "last_modified_keys=last_modified_keys) redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2], diststyle=diststyle) # If multiple", "Civis API # to increase these values for the data", "read_civis_sql(sql, \"my_database\") >>> columns = data.pop(0) >>> col_a_index = columns.index(\"column_a\")", "unload; the output format # is different which would introduce", "== 'gzip' and include_header: compression = 'none' # don't support", "no effect in versions >= 1.11 and will be removed", "for entry in manifest['entries']] >>> buf = BytesIO() >>> civis_to_file(ids[0],", "= pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}) >>>", "of Civis file IDs. Reference by name to this argument", "unloading large queries/tables from redshift as it uses a 'PARALLEL", "to upload to. E.g., ``'scratch.table'``. client : :class:`civis.APIClient`, optional If", "escaped=escaped, execution=execution, loosen_types=loosen_types, table_columns=table_columns, redshift_destination_options=redshift_options, hidden=hidden ) fut = run_job(import_job.id,", "list of columns from file cleaning. Returns -------- column_list: list[dict]", "detected_info['compression']) ) def _process_cleaning_results(cleaning_futures, client, headers, need_table_columns, delimiter): cleaned_file_ids =", "the same precision and length # (e.g VARCHAR(42), DECIMAL(8, 10))", "UPDATE, DELETE, etc.). Parameters ---------- sql : str The SQL", "from the :envvar:`CIVIS_API_KEY`. max_errors : int, optional The maximum number", "is set to False. In a future release, a ``'gzip'``", "_sql_script(client, sql, database, job_name, credential_id, hidden=False, csv_settings=None): job_name = maybe_get_random_name(job_name)", "poll_on_creation=False) download = _download_callback(script_id, run_id, filename, headers, compression) fut.add_done_callback(download) if", "for all cases. delimiter: str, optional Which delimiter to use,", "import civis_to_file, file_to_civis, query_civis from civis.utils import run_job from civis._deprecation", "_, name = split_schema_tablename(table) file_id = file_to_civis(tmp_path, name, client=client) delimiter", "new_col['name'] = 'column_{}'.format(i) new_cols.append(new_col) return new_cols def _run_cleaning(file_ids, client, need_table_columns,", "this database. Can be the database name or ID. job_name", "decompress the stream # however, our use of content-encoding is", "first row of the file should be treated as headers.", "the headers # then response.iter_content will decompress the stream #", "same order, and be in the same format. Parameters ----------" ]
[ "from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import ( BADGE,", "self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2)", "except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy =", "'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy =", "strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size)", "strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class", "strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str =", "self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding',", "def test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy", "ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50,", "PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2)", "unittest import pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies", "strategy.device) def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes)", "self.assertEqual('cuda', strategy.device) def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4,", "= 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy", "strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy = ExpectedGradientLength(4,", "BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy = BADGE(2) expected_str =", "'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def", "= ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding',", "class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes)", "ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device)", "def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda',", "self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self):", "= BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy = BADGE(2) expected_str", "strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu',", "BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self):", "ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name)", "pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2) self.assertEqual(2,", "@pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes)", "self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy =", "class BADGETest(unittest.TestCase): def test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def", "= BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase):", "self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy =", "BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes)", "strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class", "def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy))", "= 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy", "batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) self.assertEqual('embedding', strategy.layer_name)", "str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2,", "expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self):", "@pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2,", "self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)'", "strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100,", "test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy =", "self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100,", "( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase):", "test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy =", "test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda',", "test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size)", "ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def", "strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy = BADGE(4)", "def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size)", "test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100,", "strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2)", "def test_badge_str(self): strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy))", "self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str", "self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2,", "strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str,", "test_badge_str(self): strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch", "def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes)", "device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy", "small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch", "ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy", "class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50,", "from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass", "= ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self):", "strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4,", "BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def", "self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()'", "batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def test_expected_gradient_length_str(self):", "strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self): strategy = BADGE(2)", "strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4,", "strategy.num_classes) def test_badge_str(self): strategy = BADGE(2) expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str,", "@pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes)", "strategy = ExpectedGradientLengthMaxWord(2, 'embedding') self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device)", "BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def", "self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu')", "try: from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError:", "strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu')", "test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device)", "def test_init_default(self): strategy = BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy", "small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength,", "ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size)", "<reponame>chschroeder/small-text<filename>tests/unit/small_text/integrations/pytorch/test_strategies.py import unittest import pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try:", "self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def test_init(self): strategy =", "PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except", "'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) self.assertEqual('embedding',", "ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self): strategy", "'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self): strategy =", "import unittest import pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from", "= BADGE(2) self.assertEqual(2, strategy.num_classes) def test_init(self): strategy = BADGE(4) self.assertEqual(4,", "ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class BADGETest(unittest.TestCase): def test_init_default(self):", "import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord)", "ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device) def", "import ( BADGE, ExpectedGradientLength, ExpectedGradientLengthMaxWord) except PytorchNotFoundError: pass @pytest.mark.pytorch class", "def test_init(self): strategy = ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100,", "expected_str = 'BADGE(num_classes=2)' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthTest(unittest.TestCase): def test_init_default(self):", "import pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import", "test_expected_gradient_length_str(self): strategy = ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch", "= ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu',", "self.assertEqual(2, strategy.num_classes) def test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def", "pytest from small_text.integrations.pytorch.exceptions import PytorchNotFoundError try: from small_text.integrations.pytorch.query_strategies import (", "= ExpectedGradientLength(4, batch_size=100, device='cpu') self.assertEqual(4, strategy.num_classes) self.assertEqual(100, strategy.batch_size) self.assertEqual('cpu', strategy.device)", "= ExpectedGradientLength(2) expected_str = 'ExpectedGradientLength()' self.assertEqual(expected_str, str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase):", "strategy.num_classes) def test_init(self): strategy = BADGE(4) self.assertEqual(4, strategy.num_classes) def test_badge_str(self):", "strategy = ExpectedGradientLength(2) self.assertEqual(2, strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) def", "str(strategy)) @pytest.mark.pytorch class ExpectedGradientLengthMaxWordTest(unittest.TestCase): def test_init_default(self): strategy = ExpectedGradientLengthMaxWord(2, 'embedding')", "strategy.num_classes) self.assertEqual(50, strategy.batch_size) self.assertEqual('cuda', strategy.device) self.assertEqual('embedding', strategy.layer_name) def test_init(self): strategy" ]
[ "[ '2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353',", "16) g = int(c[4:8][:2], 16) b = int(c[8:][:2], 16) return", "'4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f',", "'d3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ]", "'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r =", "def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color", "= parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i", "TANGO_PALLETE = [ '2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a',", "color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color", "'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c):", "parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if i", "'060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec',", "'72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r = int(c[:4][:2],", "= cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if i < len(color_table):", "[r, g, b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec')", "def parse_tango_color(c): r = int(c[:4][:2], 16) g = int(c[4:8][:2], 16)", "'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r = int(c[:4][:2], 16)", "apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color =", "cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for", "g, b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color", "'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf',", "<gh_stars>100-1000 TANGO_PALLETE = [ '2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b',", "] def parse_tango_color(c): r = int(c[:4][:2], 16) g = int(c[4:8][:2],", "'34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8',", "= parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if", "= int(c[8:][:2], 16) return [r, g, b, 0xFF] def apply_color(cfg,", "'757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2',", "'555557575353', 'efef29292929', '8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def", "cfg.default_cursor_color = cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if i <", "r = int(c[:4][:2], 16) g = int(c[4:8][:2], 16) b =", "b = int(c[8:][:2], 16) return [r, g, b, 0xFF] def", "cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i in range(len(TANGO_PALLETE)):", "for i in range(len(TANGO_PALLETE)): if i < len(color_table): color_table[i] =", "16) b = int(c[8:][:2], 16) return [r, g, b, 0xFF]", "'eeeeeeeeecec', ] def parse_tango_color(c): r = int(c[:4][:2], 16) g =", "0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232')", "int(c[4:8][:2], 16) b = int(c[8:][:2], 16) return [r, g, b,", "return [r, g, b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color =", "b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color = parse_tango_color('eeeeeeeeecec') cfg.default_background_color =", "int(c[8:][:2], 16) return [r, g, b, 0xFF] def apply_color(cfg, color_table):", "int(c[:4][:2], 16) g = int(c[4:8][:2], 16) b = int(c[8:][:2], 16)", "'2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929',", "= [ '2e2e34343636', 'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf',", "parse_tango_color('eeeeeeeeecec') cfg.default_background_color = parse_tango_color('323232323232') cfg.default_cursor_color = cfg.default_foreground_color for i in", "'cccc00000000', '4e4e9a9a0606', 'c4c4a0a00000', '34346565a4a4', '757550507b7b', '060698989a9a', 'd3d3d7d7cfcf', '555557575353', 'efef29292929', '8a8ae2e23434',", "i in range(len(TANGO_PALLETE)): if i < len(color_table): color_table[i] = parse_tango_color(TANGO_PALLETE[i])", "= int(c[:4][:2], 16) g = int(c[4:8][:2], 16) b = int(c[8:][:2],", "g = int(c[4:8][:2], 16) b = int(c[8:][:2], 16) return [r,", "16) return [r, g, b, 0xFF] def apply_color(cfg, color_table): cfg.default_foreground_color", "= int(c[4:8][:2], 16) b = int(c[8:][:2], 16) return [r, g,", "'8a8ae2e23434', 'fcfce9e94f4f', '72729f9fcfcf', 'adad7f7fa8a8', '3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r", "'3434e2e2e2e2', 'eeeeeeeeecec', ] def parse_tango_color(c): r = int(c[:4][:2], 16) g", "cfg.default_foreground_color for i in range(len(TANGO_PALLETE)): if i < len(color_table): color_table[i]", "parse_tango_color(c): r = int(c[:4][:2], 16) g = int(c[4:8][:2], 16) b" ]
[ "as error: return authorization.handle_error_response(request, error) class RequestOriginVerifier: async def create_response(self,", "class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override this to set", "from . import oauth2_key from .user_helper import UserWithRoles USERS_SCOPE =", "BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self, grant: BaseGrant):", "status_code, body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers =", "is None else {'group_type': group_type} value = [ group['group_name'] if", "[client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info = {'sub': user.user.id, 'roles': user.roles}", "group_by_name else group['_id'] async for group in async_user_group_collection.find( {'_id': {'$in':", "not payload return RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self, request:", "DbToken.validate_document(token_data) try: if request.client_id is None: request.data['client_id'] = token.client_id elif", "elif headers.get('Location'): assert not body return ErrorRedirectResponse( status_code=status_code, headers=headers, )", "class RevocationEndpoint: async def create_response( self, raw_token: str, token_type_hint: Optional[TypeHint],", "# {'client_id': request.client_id, 'nonce': nonce}, # limit=1, # ) mod_result", "raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user = await UserWithRoles.async_load(user_id, client_id)", "str(grant.request.user.last_modified) return token # support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(),", "USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user", "query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self,", "request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error", "datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token })", "status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]): headers = dict(headers)", "request.token.client_id scopes = request.token.scope scope = USERS_SCOPE load_roles = False", "config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope:", "request: TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self, code: str, client:", "None: raise HTTPException(403, \"Invalid token\") client_id = request.token.client_id scopes =", "__call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant, token:", ") -> Response: token_data = None if token_type_hint is None", "async_token_collection.find_one({'_id': raw_token}) if token_data is None and (token_type_hint is None", "from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \\", "read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key,", "request.token.client_id scopes = request.token.scope scope = USERS_SCOPE else: client_id =", "as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as", "{'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error as", "uri does not match request\") return None except OAuth2Error as", "authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest, user_id:", "{'$set': {'nonce': None}}, ) if mod_result.modified_count != 1: return False", "\"access_token\" RefreshToken = \"refresh_token\" class RevocationEndpoint: async def create_response( self,", "UserPropertyType.groups: group_filter = {} if group_type is None else {'group_type':", "token_data is None: return None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({", "scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken,", "BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if", "# Must override this to set the client in the", "USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos", "scopes = request.token.scope scope = USERS_SCOPE load_roles = False else:", "= await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope) return", "jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def", "user_manager.common.mongo import authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\", "schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in scope_list if scope_name not", "in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user = await", "if group_type is None else {'group_type': group_type} value = [", "elif headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code, headers=headers) assert False", "+ timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document())", "= request.token.scope scope = USERS_SCOPE load_roles = False else: client_id", "client: DbClient, grant_type: str, user: UserWithRoles, scope: str): jwt_config =", "str): client_data = await async_client_collection.find_one({'_id': client_id}) if client_data is None:", "def process_token(self, grant: BaseGrant, token: dict): scope = token.get('scope') if", "\"Invalid token\") client_id = request.token.client_id scopes = request.token.scope scope =", "str) -> Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token = await", "raise HTTPException(404, \"User not found\") user_info = await self.async_generate_user_info(user, scope)", "= self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info =", "Create HttpRequest with json in body. def handle_response(self, status_code: int,", "request\") return None except OAuth2Error as error: return authorization.handle_error_response(request, error)", "self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties( self, scope: str, schema:", "request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) if request.client", "is None: raise HTTPException(403, \"Invalid token\") client_id = request.token.client_id scopes", "oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object):", "load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as error:", "except OAuth2Error as error: return authorization.handle_error_response(request, error) class RequestOriginVerifier: async", "def authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data", "= int(time.time()) user_info = {'sub': user.user.id, 'roles': user.roles} if 'groups'", "= await UserWithRoles.async_load(user_id, client_id) if user is None: raise HTTPException(404,", "not found\") user_info = await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except", "DbClient class RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri':", "if request.client is None: raise HTTPException(403, \"Invalid client in token\")", "\"refresh_token\" class RevocationEndpoint: async def create_response( self, raw_token: str, token_type_hint:", "client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from", "def generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles':", "headers=headers) assert False def handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code,", "request.data['client_id'] = token.client_id elif token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401)", "'access_tokens.token': password, 'active': True}) if user_data is None: return None", "nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class", "# TODO: Create HttpRequest with json in body. def handle_response(self,", "except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async", "if user is None: raise HTTPException(404, \"User not found\") user_info", "raw_token}) if token_data is None and (token_type_hint is None or", "token token['session_state'] = str(grant.request.user.last_modified) return token # support all openid", "await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error as error: return", "import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list", "error_uris=self.get_error_uris(request) ) headers = dict(headers) if isinstance(body, dict): return ErrorJSONResponse(", "if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri)", "request: TypedRequest, origin: str) -> Optional[Response]: try: assert isinstance(request, OAuth2Request)", "if client_data is None: return None return DbClient.validate_document(client_data) def token_generator(*_):", "assert False def handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code, body,", "client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema", "if client_data is None: return None return DbClient.validate_document(client_data) async def", "isinstance(request, OAuth2Request) return request def create_json_request(self, request): assert isinstance(request, HttpRequest)", "!= 1: return None return token def request_invalid(self, request: TypedRequest):", "TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) request.token = await", "if token_data is None: return None token = DbToken.validate_document(token_data) if", "error) class TypeHint(str, Enum): AccessToken = \"access_token\" RefreshToken = \"refresh_token\"", "return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert not payload return", "else {'group_type': group_type} value = [ group['group_name'] if group_by_name else", "authorization code flow return token token['session_state'] = str(grant.request.user.last_modified) return token", "super(self).authenticate_token_endpoint_client() self.request.client = client return client def authenticate_user(self, username: str,", "return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass", "in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope) if", "authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class", "\"Invalid token\") request.client = await async_query_client(request.token.client_id) if request.client is None:", "= None if token_type_hint is None or token_type_hint == TypeHint.AccessToken:", "'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties( self,", "now = int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id,", "token_type_hint is None or token_type_hint == TypeHint.AccessToken: token_data = await", "client_id}) if client_data is None: return None return DbClient.validate_document(client_data) def", "await run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] ) # await", "# token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response()", "time from authlib.common.security import generate_token from authlib.consts import default_json_headers from", "user_info = {'sub': user.user.id, 'roles': user.roles} if 'groups' in scope_to_list(scope):", "UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection, user_group_collection,", "user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not in scope_to_list(scope): include_refresh_token", "is None or token_type_hint == TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id':", "isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async def", "await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None: return Response() token", "JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin):", "= await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise", "TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token}) if token_data is None", ") ] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ):", "return token_data def query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id}) if", "redirect uri does not match request\") return None except OAuth2Error", "user_info['groups'] = user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_,", "headers=headers) elif headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code, headers=headers) assert", "['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code: str,", "JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant):", "class RequestOriginVerifier: async def create_response(self, request: TypedRequest, origin: str) ->", ") from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi import HTTPException", "prop.group_by_name) for scope_name in scope_list if scope_name not in ('openid',", "user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str):", "BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414", "await async_token_collection.find_one({'_id': raw_token}) if token_data is None and (token_type_hint is", "DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*']", "token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request,", "return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str): return", "is None: request.data['client_id'] = token.client_id elif token.client_id != request.client_id: raise", "dict(headers) if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'):", "not None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups: group_filter", "save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant):", "else: user_id = None now = int(time.time()) token_data = DbToken.validate_document({", "authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant,", "create_response(self, request: TypedRequest, user_id: str, client_auth: dict = None) ->", "-> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class", "item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()),", "UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code", "] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue", "async_query_client(request.token.client_id) if request.client is None: raise HTTPException(403, \"Invalid client in", "{} if group_type is None else {'group_type': group_type} value =", "None now = int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id':", "is None: return Response() token = DbToken.validate_document(token_data) try: if request.client_id", "client_id = request.token.client_id scopes = request.token.scope scope = USERS_SCOPE load_roles", "return authorization.handle_error_response(request, error) class RequestOriginVerifier: async def create_response(self, request: TypedRequest,", "request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce':", "is_openid_scope(scope): # standard authorization code flow return token token['session_state'] =", "= request.token.client_id scopes = request.token.scope scope = USERS_SCOPE load_roles =", "= request.client_id scopes = request.client.allowed_scope scope = scopes load_roles =", "authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection,", "OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try: assert", "typing import List, Optional, Tuple, Dict, Any, Union import time", "scope_name not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key for", "None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return auth_code", "def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) class", "def query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id}) if client_data is", "value = [ group['group_name'] if group_by_name else group['_id'] async for", "await async_query_client(request.token.client_id) if request.client is None: raise HTTPException(403, \"Invalid client", "raise NotImplementedError() # TODO: Create HttpRequest with json in body.", "user.roles, } for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()):", "prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] =", "authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}}, ) if", "def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id':", "found\") user_info = await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error", "refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data is None:", "load_roles = True if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing", "exists_nonce(self, nonce: str, request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents( #", "credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def", "== UserPropertyType.picture: if value is not None: value = f\"{config.oauth2.base_url}/picture/{value}\"", "if value is not None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type", "authenticate_user(self, username: str, password: str): user_data = user_collection.find_one({'email': username, 'access_tokens.token':", "translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers) if isinstance(body, dict): return", "schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ]", "True, **group_filter}, projection={'group_name' if group_by_name else '_id': 1} ) ]", "scope: str = None, scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request)", "scopes = request.token.scope scope = USERS_SCOPE else: client_id = request.client_id", "await async_client_collection.find_one({'_id': client_id}) if client_data is None: return None return", "= USERS_SCOPE load_roles = False else: client_id = request.client_id scopes", "not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key, None) if", "token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def", "raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"]", "None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return None return auth_code", "DbClient, grant_type: str, user: UserWithRoles, scope: str): jwt_config = self.get_jwt_config()", "= DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow()", "run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403, \"Invalid", "auth_code def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self,", "False return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token) authorization", ") # await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id':", "isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token", "False def save_authorization_code(code: str, request: TypedRequest): nonce = request.data.get('nonce') item", "request.token.scope scope = USERS_SCOPE load_roles = False else: client_id =", "handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]): headers =", "generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration,", "self.request.client = client return client def authenticate_user(self, username: str, password:", "client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not in", "request.client_id, 'nonce': nonce}, # limit=1, # ) mod_result = authorization_code_collection.update_one(", "error: return authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken = \"access_token\"", "token_collection, \\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection,", "token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope: str =", "await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info)", "is None and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken):", "as _BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core", "DbToken.validate_document(token_data) if auth_code.is_expired(): return None return auth_code def authenticate_user(self, credential:", "request) if request.token is None: raise HTTPException(403, \"Invalid token\") client_id", "def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant,", "headers=headers, ) assert False def save_authorization_code(code: str, request: TypedRequest): nonce", "_OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration", "[ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in scope_list if", "= getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture: if value", "\\ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import UserInfo", "return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'): assert not", "from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator,", "TypedRequest, error: OAuth2Error): status_code, body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request)", "user_id = request.user.user.id else: user_id = None now = int(time.time())", "('openid', 'offline_access') and scope_name in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties", "OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def", "self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest)", "\"Invalid token\") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await", "getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture: if value is", "return None return token def request_invalid(self, request: TypedRequest): return False", "'nonce': nonce}, # limit=1, # ) mod_result = authorization_code_collection.update_one( {'client_id':", "'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data def", "= [] for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info =", "-> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request: TypedRequest):", "DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, }) != 1:", "client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, }) != 1: return None", "enum import Enum from typing import List, Optional, Tuple, Dict,", "status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self)", "in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos = []", "'client_id': request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in',", "class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient,", "request.client_id scopes = request.client.allowed_scope scope = scopes load_roles = True", "request.user: user_id = request.user.user.id else: user_id = None now =", "scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response:", "BaseGrant, token: dict): scope = token.get('scope') if not scope or", "None) -> Response: try: assert isinstance(request, OAuth2Request) if request.client is", "group_by_name in self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key): continue value", "return None return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code})", "TypedRequest ) -> Response: token_data = None if token_type_hint is", "Response: try: assert isinstance(request, OAuth2Request) if request.client is None: request.token", "token.client_id, 'user_id': token.user_id, }) != 1: return None return token", "INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token':", "client in the request, to make it available to authenticate_user", "raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest ) -> Response: token_data", "DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType from user_manager.common.mongo import", "payload: Optional[dict], headers: List[Tuple[str, str]]): headers = dict(headers) if isinstance(payload,", "token_data is None and (token_type_hint is None or token_type_hint ==", "prop.key, None) if prop.type == UserPropertyType.picture: if value is not", "= AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request) return", "OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant:", "self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token) authorization = AuthorizationServer( query_client,", "authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] ) # await async_token_collection.update_one({'_id': token.access_token},", "{'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}}, ) if mod_result.modified_count", "json in body. def handle_response(self, status_code: int, payload: Optional[dict], headers:", "False def handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code, body, headers", "group_type is None else {'group_type': group_type} value = [ group['group_name']", "OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def", "HTTPException(403, \"Invalid token\") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info =", ") from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant", "ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND')", "TypeHint(str, Enum): AccessToken = \"access_token\" RefreshToken = \"refresh_token\" class RevocationEndpoint:", "assert isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async", "\\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection, read_schema,", "return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration =", "authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data is", "generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles': user.roles,", "code: str, request: TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self, code:", "raise HTTPException(403, \"Invalid token\") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info", "AccessToken = \"access_token\" RefreshToken = \"refresh_token\" class RevocationEndpoint: async def", "'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request:", "= DbToken.validate_document(token_data) try: if request.client_id is None: request.data['client_id'] = token.client_id", "authenticate_token_endpoint_client(self): # Must override this to set the client in", "headers: List[Tuple[str, str]]): headers = dict(headers) if isinstance(payload, dict): return", "not load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as", "UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) async", "request: TypedRequest): nonce = request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id,", "authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self,", "OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import", "is None: return None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return", "= config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles,", "= None, scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope,", "= request.client.allowed_scope scope = scopes if USERS_SCOPE not in scope_to_list(scopes):", "UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles, scope: str): user_data =", "create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request) return request def create_json_request(self,", "token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection = OtherUserInspection() other_users_inspection", "dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert not payload", "int, payload: Optional[dict], headers: List[Tuple[str, str]]): headers = dict(headers) if", "_AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import", "USERS_SCOPE load_roles = False else: client_id = request.client_id scopes =", "await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error as error: return", "return authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection()", "TypedRequest): if request.user: user_id = request.user.user.id else: user_id = None", "str, request: TypedRequest): nonce = request.data.get('nonce') item = DbAuthorizationCode( code=code,", "def create_response( self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest )", "authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override this to", "as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi", "expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token',", "None and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken): token_data", "\\ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from . import oauth2_key", "group['_id'] for group in user_group_collection.find( {'_id': {'$in': value}, 'visible': True,", "authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation", "ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return ErrorJSONResponse(", "authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): #", "from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util", "token_data = await async_token_collection.find_one({'_id': raw_token}) if token_data is None and", "UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) async def", "token def request_invalid(self, request: TypedRequest): return False def token_revoked(self, token:", "revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token})", "token.get('scope') if not scope or not is_openid_scope(scope): # standard authorization", "{ 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, }", "= AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState:", "{'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token})", "request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] ) # await async_token_collection.update_one({'_id': token.access_token}, {'$set':", "await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope) if not", "} for key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()):", "body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers)", "in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ] def generate_user_info(self, user:", "from user_manager.common.mongo import authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection,", "standard authorization code flow return token token['session_state'] = str(grant.request.user.last_modified) return", "auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return auth_code def", "async def create_response( self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest", "request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str): client_data", "refresh_token}) if token_data is None: return None auth_code = DbToken.validate_document(token_data)", "authlib.consts import default_json_headers from authlib.oauth2 import ( OAuth2Request, AuthorizationServer as", "None except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin):", "the client in the request, to make it available to", "continue user_data[key] = value return UserInfo(**user_data) async def async_generate_user_info(self, user:", "user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as error: return authorization.handle_error_response(request, error)", "BaseGrant, ) from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, )", "if scope_name not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key", "Any, Union import time from authlib.common.security import generate_token from authlib.consts", "scope = token.get('scope') if not scope or not is_openid_scope(scope): #", "grant_type, user, scope, expires_in, include_refresh_token) authorization = AuthorizationServer( query_client, save_token,", "fastapi import HTTPException from starlette.concurrency import run_in_threadpool from starlette.responses import", "class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return", "Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property],", "(prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in scope_list if scope_name", "USERS_SCOPE else: client_id = request.client_id scopes = request.client.allowed_scope scope =", "not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos =", "from datetime import datetime, timedelta from enum import Enum from", "( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from", "import UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential:", "else group['_id'] for group in user_group_collection.find( {'_id': {'$in': value}, 'visible':", "JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error) class RequestOriginVerifier:", "UserWithRoles.async_load(user_id, client_id) if user is None: raise HTTPException(404, \"User not", "token\") client_id = request.token.client_id scopes = request.token.scope scope = USERS_SCOPE", "from starlette.concurrency import run_in_threadpool from starlette.responses import Response, JSONResponse from", "= USERS_SCOPE else: client_id = request.client_id scopes = request.client.allowed_scope scope", "ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str:", "def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str,", "request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) request.token =", "# await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token},", "str, token_type_hint: Optional[TypeHint], request: TypedRequest ) -> Response: token_data =", "timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return", "'offline_access') and scope_name in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if", "in async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if", "token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data is None: return None", "None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin,", "OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) ->", "class RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']},", "class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert", "*args, **kwargs): return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer,", "None: return None return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class", "List[Tuple[str, str]]): headers = dict(headers) if isinstance(payload, dict): return JSONResponse(payload,", "raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos = [] for user", "str): jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time())", "def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) def", "{'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name else", "user = await UserWithRoles.async_load(user_id, client_id) if user is None: raise", "prop, group_type, group_by_name in self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key):", "schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles, scope: str): user_data =", "import UserInfo from authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant", "if prop.type == UserPropertyType.picture: if value is not None: value", "= DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow()", "scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not in scope_to_list(scope): include_refresh_token =", "Response: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None,", "token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any], request: TypedRequest): if request.user:", "if request.user: user_id = request.user.user.id else: user_id = None now", "await async_read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user,", "token = DbToken.validate_document(token_data) try: if request.client_id is None: request.data['client_id'] =", "NotImplementedError() # TODO: Create HttpRequest with json in body. def", "== TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token}) if token_data is", "or token_type_hint == TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if", "-> Response: token_data = None if token_type_hint is None or", "request.client.allowed_scope scope = scopes load_roles = True if USERS_SCOPE not", "DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id':", "if request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request)", "'*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient", "generate_id_token from fastapi import HTTPException from starlette.concurrency import run_in_threadpool from", "import Response, JSONResponse from user_manager.common.config import config from user_manager.common.models import", "return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN", "token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self,", "if 'offline_access' not in scope_to_list(scope): include_refresh_token = False return super(BearerToken,", "def authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id': token_string}) if token_data", "def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode):", "import Enum from typing import List, Optional, Tuple, Dict, Any,", "client.id}) if auth_code_data is None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data)", "query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id}) if client_data is None:", "scope_name in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if prop.user_property in", "{'sub': user.user.id, 'roles': user.roles} if 'groups' in scope_to_list(scope): user_info['groups'] =", "HTTPException(403, \"Invalid client in token\") if not request.client.check_redirect_uri(origin): raise HTTPException(403,", "_OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration", "async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name", "headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request:", "expires_in, include_refresh_token) authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator),", "self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request,", "= {} if group_type is None else {'group_type': group_type} value", "value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post']", ") headers = dict(headers) if isinstance(body, dict): return ErrorJSONResponse( content=body,", "{ 'roles': user.roles, } for key, prop, group_type, group_by_name in", "self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user,", "value is not None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type ==", "grant_type: str, user: UserWithRoles, scope: str): jwt_config = self.get_jwt_config() jwt_config['aud']", "config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request)", "token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request,", "ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override this to set the", "request.client_id scopes = request.client.allowed_scope scope = scopes if USERS_SCOPE not", "from authlib.oidc.core import UserInfo from authlib.oidc.core.grants import ( OpenIDCode as", "str]]): headers = dict(headers) if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code,", "_ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator", "from fastapi import HTTPException from starlette.concurrency import run_in_threadpool from starlette.responses", "str): user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True}) if", "= DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return auth_code def delete_authorization_code(self,", "str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request: TypedRequest): return", "= user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True}) if user_data is", "OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def", "client: DbClient class RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return JSONResponse(", "client_collection.find_one({'_id': client_id}) if client_data is None: return None return DbClient.validate_document(client_data)", "DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id': client_id})", "if group_by_name else group['_id'] async for group in async_user_group_collection.find( {'_id':", "generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client:", "return token # support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()])", "False return True class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args,", "group_type, group_by_name in self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key): continue", "in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key", "for group in user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter},", "self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request,", "raise HTTPException(403, \"Allowed redirect uri does not match request\") return", "authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self):", "user.roles} if 'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({},", "HTTPException(404, \"User not found\") user_info = await self.async_generate_user_info(user, scope) return", "DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope) return [", "continue user_data[key] = value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS =", "def create_response(self, request: TypedRequest, user_id: str, client_auth: dict = None)", "assert False def save_authorization_code(code: str, request: TypedRequest): nonce = request.data.get('nonce')", "token_string}) if token_data is None: return None token = DbToken.validate_document(token_data)", "return None except OAuth2Error as error: return authorization.handle_error_response(request, error) class", "True def authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if", "is_openid_scope, generate_id_token from fastapi import HTTPException from starlette.concurrency import run_in_threadpool", "create_json_request(self, request): assert isinstance(request, HttpRequest) raise NotImplementedError() # TODO: Create", "status_code=401) await run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] ) #", "= f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups: group_filter = {} if", "UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try: assert", "does not match request\") return None except OAuth2Error as error:", "from starlette.responses import Response, JSONResponse from user_manager.common.config import config from", "for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()): if not", "OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is", "OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest, user_id: str, client_auth: dict", "['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token: str): token_data", "import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as", "dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'): assert", "return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in scope_list", "**kwargs): return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp':", "not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect uri does not match", "{'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id':", "if user_data is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class", "match request\") return None except OAuth2Error as error: return authorization.handle_error_response(request,", "user_infos = [] for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info", "as error: return authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection", "Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None,", "elif prop.type == UserPropertyType.groups: group_filter = {} if group_type is", "payload return RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self, request: TypedRequest,", "except OAuth2Error as error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async", "group['_id'] async for group in async_user_group_collection.find( {'_id': {'$in': value}, 'visible':", "create_response(self, request: TypedRequest, origin: str) -> Optional[Response]: try: assert isinstance(request,", "timedelta from enum import Enum from typing import List, Optional,", "DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection,", "return UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles, scope: str): user_data", "scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos = [] for", "= [ group['group_name'] if group_by_name else group['_id'] async for group", "user_info = await self.async_generate_user_info(user, scope) if not load_roles: del user_info['roles']", "class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def", "OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin,", "class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']},", "value = [ group['group_name'] if group_by_name else group['_id'] for group", "None else {'group_type': group_type} value = [ group['group_name'] if group_by_name", "request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True", "hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key, None) if prop.type", "return None return DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data =", "token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await", "None: raise HTTPException(404, \"User not found\") user_info = await self.async_generate_user_info(user,", "if auth_code_data is None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if", "prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user,", "flow return token token['session_state'] = str(grant.request.user.last_modified) return token # support", "str, schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list", "client_data = client_collection.find_one({'_id': client_id}) if client_data is None: return None", ") class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest):", ") elif headers.get('Location'): assert not body return ErrorRedirectResponse( status_code=status_code, headers=headers,", "= await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error as error:", "jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration =", "if group_by_name else '_id': 1} ) ] elif prop.type in", "from typing import List, Optional, Tuple, Dict, Any, Union import", "class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length", "def authenticate_token_endpoint_client(self): # Must override this to set the client", "List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope) return", "client in token\") if not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect", "if token_type_hint is None or token_type_hint == TypeHint.AccessToken: token_data =", "= error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers) if isinstance(body,", "= UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection =", "scope', request.uri) user = await UserWithRoles.async_load(user_id, client_id) if user is", "= int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at':", "not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user =", "authlib.oauth2 import ( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector,", "None: return None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return None", "error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers) if isinstance(body, dict):", "): continue user_data[key] = value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS", "request: TypedRequest): return False def token_revoked(self, token: DbToken): return token.revoked", "error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def create_response(self, request:", "config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code:", "in scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config)", "DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data is", "scopes = request.client.allowed_scope scope = scopes load_roles = True if", "scope = USERS_SCOPE else: client_id = request.client_id scopes = request.client.allowed_scope", "BearerToken as _BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from", "is None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return", "grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant, token: dict): scope =", "int def get_jwt_config(self, *args, **kwargs): return { 'key': oauth2_key.key.key, 'alg':", "request.uri) user = await UserWithRoles.async_load(user_id, client_id) if user is None:", "dict): scope = token.get('scope') if not scope or not is_openid_scope(scope):", "[ group['group_name'] if group_by_name else group['_id'] for group in user_group_collection.find(", "authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id':", "return None return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin,", "user_info = await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error as", "run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] ) # await async_token_collection.update_one({'_id':", "UserWithRoles, scope: str): jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time']", "_OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token", "config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def", ". import oauth2_key from .user_helper import UserWithRoles USERS_SCOPE = '*users'", "None return auth_code def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id)", "scopes load_roles = True if USERS_SCOPE not in scope_to_list(scopes): raise", "None return token def request_invalid(self, request: TypedRequest): return False def", "TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic']", "[] for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await", "UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except", "import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, )", "limit=1, # ) mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce},", "user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True}) if user_data", "return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token) authorization =", "schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles,", "prop.type == UserPropertyType.groups: group_filter = {} if group_type is None", "authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant", "import InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant", "is None or token_type_hint == TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token':", "is None: raise HTTPException(403, \"Invalid token\") request.client = await async_query_client(request.token.client_id)", "isinstance(request, OAuth2Request) if request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request,", "= dict(headers) if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif", ") class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse:", "in scope_to_list(scope): include_refresh_token = False return super(BearerToken, self).__call__(client, grant_type, user,", "request: TypedRequest): if request.user: user_id = request.user.user.id else: user_id =", "jwt_config['auth_time'] = int(time.time()) user_info = {'sub': user.user.id, 'roles': user.roles} if", "= value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic',", "async_client_collection.find_one({'_id': client_id}) if client_data is None: return None return DbClient.validate_document(client_data)", "def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def", "as _BearerTokenValidator, BearerToken as _BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414 import", "request.token is None: raise HTTPException(403, \"Invalid token\") request.client = await", "RequestOriginVerifier: async def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]:", "user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType", "from .user_helper import UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user:", "ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class =", "None}}, ) if mod_result.modified_count != 1: return False return True", "async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error as error: return authorization.handle_error_response(request,", "AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, ) from", "ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin,", "= ['*'] + scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name)", "error) class RequestOriginVerifier: async def create_response(self, request: TypedRequest, origin: str)", "include_refresh_token=True): if 'offline_access' not in scope_to_list(scope): include_refresh_token = False return", "openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()])", "HttpRequest with json in body. def handle_response(self, status_code: int, payload:", "None return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def", "'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class", "{'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any], request: TypedRequest):", "!= 1: return False return True class JwtConfigMixin(object): jwt_token_expiration: int", "-> DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator) class", "TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) if request.client is", "= await self.async_generate_user_info(request.user, request.token.scope) return JSONResponse(user_info) except OAuth2Error as error:", "async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from . import", "authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id': token_string}) if token_data is", "auth_code_data is None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired():", "TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response):", "str = None, scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request) return", "support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant,", "import generate_token from authlib.consts import default_json_headers from authlib.oauth2 import (", "user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response): def", "for prop in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ] def", "request: TypedRequest, user_id: str, client_auth: dict = None) -> Response:", "request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if", "return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code,", "def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]): headers", "item class ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request: TypedRequest): #", "'roles': user.roles, } for key, prop, group_type, group_by_name in self._translate_properties(scope,", "JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs): return { 'key':", "= dict(headers) if isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers,", "auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object):", "def to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers),", "authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken", "= token_collection.find_one({'refresh_token': refresh_token}) if token_data is None: return None auth_code", "OAuth2Error as error: return authorization.handle_error_response(request, error) class RequestOriginVerifier: async def", "return request def create_json_request(self, request): assert isinstance(request, HttpRequest) raise NotImplementedError()", "user: UserWithRoles, scope: str): user_data = { 'roles': user.roles, }", "'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token: str): token_data =", "async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id': client_id}) if client_data is", "# exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce}, #", "set the client in the request, to make it available", ") class OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def", "( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant,", "code flow return token token['session_state'] = str(grant.request.user.last_modified) return token #", "ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin,", "user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as error: return authorization.handle_error_response(request,", "Response() token = DbToken.validate_document(token_data) try: if request.client_id is None: request.data['client_id']", "= await async_client_collection.find_one({'_id': client_id}) if client_data is None: return None", "DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token:", "jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type: str, user:", "await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked':", "self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class", "DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() +", "try: assert isinstance(request, OAuth2Request) if request.client is None: request.token =", "\"client_secret_post\"] ) # await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) #", "token_type_hint == TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token}) if token_data", "authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \\ InsufficientScopeError", "group_type, group_by_name in self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user, prop.key):", ") authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def exists_nonce(self, nonce: str,", "in ('openid', 'offline_access') and scope_name in schema.scopes_by_key for prop in", "TypedRequest): nonce = request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri,", "_AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import (", "status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self,", "None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is", "= False return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token)", "body return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False def save_authorization_code(code:", "authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant,", "async def async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id': client_id}) if", "return auth_code def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def", "None if token_type_hint is None or token_type_hint == TypeHint.AccessToken: token_data", "return token def request_invalid(self, request: TypedRequest): return False def token_revoked(self,", "to make it available to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client", "save_authorization_code(code: str, request: TypedRequest): nonce = request.data.get('nonce') item = DbAuthorizationCode(", "not in ('openid', 'offline_access') and scope_name in schema.scopes_by_key for prop", "scope) if not load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except", "the request, to make it available to authenticate_user client =", "error: return authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection =", "assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if", "= authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}}, )", "authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str):", "else '_id': 1} ) ] elif prop.type in ( UserPropertyType.access_token,", "not body return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False def", "DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must", "Optional, Tuple, Dict, Any, Union import time from authlib.common.security import", "user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True}) if user_data is None:", "TODO: Create HttpRequest with json in body. def handle_response(self, status_code:", "if mod_result.modified_count != 1: return False return True class JwtConfigMixin(object):", "BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant, token: dict): scope", "group in async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name'", "== UserPropertyType.groups: group_filter = {} if group_type is None else", "load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope) if not load_roles: del", "token_string: str): token_data = token_collection.find_one({'_id': token_string}) if token_data is None:", "client_id = request.client_id scopes = request.client.allowed_scope scope = scopes if", "content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'): assert not body return", "( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list from", "OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken: assert isinstance(request,", "False else: client_id = request.client_id scopes = request.client.allowed_scope scope =", "status_code=status_code, headers=headers, ) elif headers.get('Location'): assert not body return ErrorRedirectResponse(", "from authlib.oauth2.rfc6750 import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \\", "= request.client.allowed_scope scope = scopes load_roles = True if USERS_SCOPE", "continue value = getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture:", "-> Response: try: assert isinstance(request, OAuth2Request) if request.client is None:", "-> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class", "group_by_name else group['_id'] for group in user_group_collection.find( {'_id': {'$in': value},", "UserInfo from authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as", "headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code, headers=headers) assert False def", "body. def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str, str]]):", "from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant,", "user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type)", "AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type:", "code: str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id})", "OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from", "to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), )", "authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class", "default_json_headers from authlib.oauth2 import ( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector", "not in scope_to_list(scope): include_refresh_token = False return super(BearerToken, self).__call__(client, grant_type,", "is None: request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token", ") from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from", "validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken:", "token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response() except", "isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'):", "[ group['group_name'] if group_by_name else group['_id'] async for group in", "return item class ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request: TypedRequest):", "__call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope: str): jwt_config", "client_data is None: return None return DbClient.validate_document(client_data) def token_generator(*_): return", "def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def", "in self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user, prop.key): continue value", "content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse):", "class UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try:", "'client_id': token.client_id, 'user_id': token.user_id, }) != 1: return None return", "return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration", "List, Optional, Tuple, Dict, Any, Union import time from authlib.common.security", "scopes = request.client.allowed_scope scope = scopes if USERS_SCOPE not in", "request.client.allowed_scope scope = scopes if USERS_SCOPE not in scope_to_list(scopes): raise", "async for group in async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True,", "return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH", "UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked':", "OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin,", "self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info = {'sub':", "request.client = await async_query_client(request.token.client_id) if request.client is None: raise HTTPException(403,", "raise HTTPException(403, \"Invalid client in token\") if not request.client.check_redirect_uri(origin): raise", "client_id = request.token.client_id scopes = request.token.scope scope = USERS_SCOPE else:", "= request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id,", "create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) if", "ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False def save_authorization_code(code: str, request:", "credential.access_token}) def save_token(token: Dict[str, Any], request: TypedRequest): if request.user: user_id", "def validate(self, request: OAuth2Request, scope: str = None, scope_operator='AND') ->", "_RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant,", "{'group_type': group_type} value = [ group['group_name'] if group_by_name else group['_id']", "for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user,", "request.token.scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error)", "jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info", "credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken): #", "DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope:", "return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None,", "save_authorization_code(code, request) def query_authorization_code(self, code: str, client: DbClient): auth_code_data =", "ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401,", ") assert False def save_authorization_code(code: str, request: TypedRequest): nonce =", "UserPropertyType.picture: if value is not None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif", "= authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data is None: return", "resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier()", "OAuth2Request) if request.client is None: request.token = await run_in_threadpool(resource_protector.validate_request, None,", "async_read_schema from . import oauth2_key from .user_helper import UserWithRoles USERS_SCOPE", "client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data", "import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType from", "return None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id':", "request.user.user.id else: user_id = None now = int(time.time()) token_data =", "JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class", "nonce: str, request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents( # {'client_id':", "RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant", "request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user, request.token.scope)", "[OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator):", "TypedRequest, user_id: str, client_auth: dict = None) -> Response: try:", "save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN =", "and scope_name in schema.scopes_by_key for prop in schema.scopes_by_key[scope_name].properties if prop.user_property", "authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken = \"access_token\" RefreshToken =", "= user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type:", "token_type_hint == TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if token_data", "authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data is None: return None", "jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration =", ") if mod_result.modified_count != 1: return False return True class", "return authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken = \"access_token\" RefreshToken", "try: if request.client_id is None: request.data['client_id'] = token.client_id elif token.client_id", "username, 'access_tokens.token': password, 'active': True}) if user_data is None: return", "auth_code.is_expired(): return None return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id':", "'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request: TypedRequest):", "return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override", "or not is_openid_scope(scope): # standard authorization code flow return token", "as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from", "BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id': token_string}) if", "authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant):", "from user_manager.common.config import config from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient,", "to set the client in the request, to make it", "str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self, client, grant_type,", "code=generate_token(config.oauth2.access_token_length), **jwt_config) def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class", "OAuth2Error as error: return authorization.handle_error_response(request, error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator())", "\"User not found\") user_info = await self.async_generate_user_info(user, scope) return JSONResponse(user_info)", "AuthorizationServerMetadata from authlib.oidc.core import UserInfo from authlib.oidc.core.grants import ( OpenIDCode", "request.client is None: raise HTTPException(403, \"Invalid client in token\") if", "scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length), **jwt_config) def", "import AuthorizationServerMetadata from authlib.oidc.core import UserInfo from authlib.oidc.core.grants import (", "'active': True}) if user_data is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data),", "scope or not is_openid_scope(scope): # standard authorization code flow return", "def exists_nonce(self, nonce: str, request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents(", "OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data =", "as _ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError from", "generate_token from authlib.consts import default_json_headers from authlib.oauth2 import ( OAuth2Request,", "**token }) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str): client_data =", "jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs): return { 'key': oauth2_key.key.key,", "auth_code.is_expired(): return None return auth_code def authenticate_user(self, credential: DbToken): return", "async_client_collection, user_collection, read_schema, async_read_schema from . import oauth2_key from .user_helper", "if not scope or not is_openid_scope(scope): # standard authorization code", "] def generate_user_info(self, user: UserWithRoles, scope: str): user_data = {", "user_manager.common.config import config from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser,", "request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)),", "-> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope)", "'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def", "credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error", "__call__(self, client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not", "request) def query_authorization_code(self, code: str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id':", "group_type} value = [ group['group_name'] if group_by_name else group['_id'] async", "user_id = None now = int(time.time()) token_data = DbToken.validate_document({ 'client_id':", "def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]: try: assert", "AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def", "None or token_type_hint == TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token})", "authlib.oidc.core import UserInfo from authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode,", "None, request) if request.token is None: raise HTTPException(403, \"Invalid token\")", "RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200,", "in the request, to make it available to authenticate_user client", "InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import UserInfo from", "return save_authorization_code(code, request) def query_authorization_code(self, code: str, client: DbClient): auth_code_data", "if request.token is None: raise HTTPException(403, \"Invalid token\") client_id =", "in scope_list if scope_name not in ('openid', 'offline_access') and scope_name", "UserWithRoles, scope: str): user_data = { 'roles': user.roles, } for", "None: request.data['client_id'] = token.client_id elif token.client_id != request.client_id: raise InvalidClientError(state=request.state,", "AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert isinstance(request,", "str, client_auth: dict = None) -> Response: try: assert isinstance(request,", "all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(),", "async def create_response(self, request: TypedRequest, user_id: str, client_auth: dict =", "prop.user_property in schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles, scope: str):", "is None: raise HTTPException(404, \"User not found\") user_info = await", "user_introspection = UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection", "\"Allowed redirect uri does not match request\") return None except", "request.token.scope scope = USERS_SCOPE else: client_id = request.client_id scopes =", "= '*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client:", "HTTPException(403, \"Invalid token\") client_id = request.token.client_id scopes = request.token.scope scope", "raise HTTPException(403, \"Invalid token\") client_id = request.token.client_id scopes = request.token.scope", "return DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id':", "Optional[TypeHint], request: TypedRequest ) -> Response: token_data = None if", "# support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant)", "token_data is None: return Response() token = DbToken.validate_document(token_data) try: if", "token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id})", "True if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope',", "\"client_secret_basic\", \"client_secret_post\"] ) # await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}})", "scope: str, schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]:", "is not None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups:", "+ scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name", "error: return authorization.handle_error_response(request, error) class RequestOriginVerifier: async def create_response(self, request:", "mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce}, # limit=1, # )", "import oauth2_key from .user_helper import UserWithRoles USERS_SCOPE = '*users' class", "if token_data is None: return None auth_code = DbToken.validate_document(token_data) if", "group_by_name else '_id': 1} ) ] elif prop.type in (", "or token_type_hint == TypeHint.AccessToken: token_data = await async_token_collection.find_one({'_id': raw_token}) if", "if not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect uri does not", "request.token is None: raise HTTPException(403, \"Invalid token\") client_id = request.token.client_id", "ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750", "authlib.common.security import generate_token from authlib.consts import default_json_headers from authlib.oauth2 import", "group in user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name'", "class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin,", "def save_authorization_code(code: str, request: TypedRequest): nonce = request.data.get('nonce') item =", "grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self, grant: BaseGrant, token: dict):", "self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user, prop.key): continue value =", "'client_id': client.id}) if auth_code_data is None: return None auth_code =", "from authlib.oidc.core.grants import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant,", "from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as", "async_generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles': user.roles,", "return RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self, request: TypedRequest, error:", "raise HTTPException(403, \"Invalid token\") request.client = await async_query_client(request.token.client_id) if request.client", "str, password: str): user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active':", "None or token_type_hint == TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token})", "metadata_class = AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request)", "class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs): return {", "= True if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\"", "import default_json_headers from authlib.oauth2 import ( OAuth2Request, AuthorizationServer as _AuthorizationServer,", "grant_type) class BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None, scope=None, expires_in=None,", "TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self, code: str, client: DbClient):", "if prop.user_property in schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles, scope:", "user_data is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin,", "user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope,", "None: return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None", "int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at': now,", "): continue user_data[key] = value return UserInfo(**user_data) async def async_generate_user_info(self,", "and (token_type_hint is None or token_type_hint == TypeHint.RefreshToken): token_data =", "password, 'active': True}) if user_data is None: return None return", "(token_type_hint is None or token_type_hint == TypeHint.RefreshToken): token_data = await", "authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string:", "token: dict): scope = token.get('scope') if not scope or not", "'nonce': nonce}, {'$set': {'nonce': None}}, ) if mod_result.modified_count != 1:", "from authlib.consts import default_json_headers from authlib.oauth2 import ( OAuth2Request, AuthorizationServer", "token_collection.find_one({'_id': token_string}) if token_data is None: return None token =", "JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return", "None: raise HTTPException(403, \"Invalid token\") request.client = await async_query_client(request.token.client_id) if", "import HTTPException from starlette.concurrency import run_in_threadpool from starlette.responses import Response,", "_OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi import", "'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time':", "nonce}, # limit=1, # ) mod_result = authorization_code_collection.update_one( {'client_id': request.client_id,", "oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties(", "import List, Optional, Tuple, Dict, Any, Union import time from", "class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data = token_collection.find_one({'_id': token_string})", "= None) -> Response: try: assert isinstance(request, OAuth2Request) if request.client", "in schema.properties_by_key ] def generate_user_info(self, user: UserWithRoles, scope: str): user_data", "= {'sub': user.user.id, 'roles': user.roles} if 'groups' in scope_to_list(scope): user_info['groups']", "{'nonce': None}}, ) if mod_result.modified_count != 1: return False return", "override this to set the client in the request, to", "import authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection, user_group_collection, async_token_collection, \\ async_user_group_collection,", "user_info = await self.async_generate_user_info(user, scope) return JSONResponse(user_info) except OAuth2Error as", "str, request: TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self, code: str,", "generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request)", "run_in_threadpool from starlette.responses import Response, JSONResponse from user_manager.common.config import config", "client = super(self).authenticate_token_endpoint_client() self.request.client = client return client def authenticate_user(self,", "user_id: str, client_auth: dict = None) -> Response: try: assert", "InsufficientScopeError('Missing \"*users\" scope', request.uri) user_infos = [] for user in", "UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope) if not load_roles:", "from authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi import HTTPException from", "HTTPException(403, \"Invalid token\") request.client = await async_query_client(request.token.client_id) if request.client is", "request: TypedRequest, error: OAuth2Error): status_code, body, headers = error( translations=self.get_translations(request),", "token.access_token}) return Response() except OAuth2Error as error: return authorization.handle_error_response(request, error)", "async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked': True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True})", "OAuth2Error as error: return authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken", "if request.token is None: raise HTTPException(403, \"Invalid token\") request.user =", "is None: raise HTTPException(403, \"Invalid client in token\") if not", "try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request, None, request)", "class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self)", "expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def", "None: raise HTTPException(403, \"Invalid client in token\") if not request.client.check_redirect_uri(origin):", "\\ UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection, \\ client_collection, client_user_cache_collection,", "**jwt_config) def token_expires_in(_, grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken):", "ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError", "if not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key, None)", "request_invalid(self, request: TypedRequest): return False def token_revoked(self, token: DbToken): return", "now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(),", "token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, })", "= request.user.user.id else: user_id = None now = int(time.time()) token_data", "!= request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request, [\"none\",", "datetime import datetime, timedelta from enum import Enum from typing", "if request.token is None: raise HTTPException(403, \"Invalid token\") request.client =", "client_id = request.client_id scopes = request.client.allowed_scope scope = scopes load_roles", "with json in body. def handle_response(self, status_code: int, payload: Optional[dict],", "from authlib.common.security import generate_token from authlib.consts import default_json_headers from authlib.oauth2", "prop in schema.scopes_by_key[scope_name].properties if prop.user_property in schema.properties_by_key ] def generate_user_info(self,", "return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error) class", "headers = dict(headers) if isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code,", "if auth_code.is_expired(): return None return auth_code def authenticate_user(self, credential: DbToken):", "True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any], request: TypedRequest): if", "in token\") if not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect uri", "request: TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none',", "request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce,", "str, request: TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS =", "return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self, request:", "= client return client def authenticate_user(self, username: str, password: str):", "config from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty,", "timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def exists_nonce(self, nonce:", "request.token = await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None:", "import ( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error,", "str): token_data = token_collection.find_one({'_id': token_string}) if token_data is None: return", "error: OAuth2Error): status_code, body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) )", "raw_token}) if token_data is None: return Response() token = DbToken.validate_document(token_data)", "+ timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def exists_nonce(self,", "def query_authorization_code(self, code: str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code,", "def handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code, body, headers =", "True}) if user_data is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id)", "if request.client_id is None: request.data['client_id'] = token.client_id elif token.client_id !=", "None return DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data = await", "JwtConfigMixin, _OpenIDCode): jwt_token_expiration = config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant):", "class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin,", "JSONResponse: return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer):", "str): token_data = token_collection.find_one({'refresh_token': refresh_token}) if token_data is None: return", "content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata", "not match request\") return None except OAuth2Error as error: return", "nonce}, {'$set': {'nonce': None}}, ) if mod_result.modified_count != 1: return", "_BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import", "if 'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({}, user_info,", "TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None:", "request.client_id is None: request.data['client_id'] = token.client_id elif token.client_id != request.client_id:", "include_refresh_token) authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), )", "Enum): AccessToken = \"access_token\" RefreshToken = \"refresh_token\" class RevocationEndpoint: async", ") -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list = ['*'] +", "token\") request.client = await async_query_client(request.token.client_id) if request.client is None: raise", "client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), )", "str): user_data = { 'roles': user.roles, } for key, prop,", "def async_generate_user_info(self, user: UserWithRoles, scope: str): user_data = { 'roles':", "request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect uri does not match request\")", "HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants import (", "token.client_id elif token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool(", "DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin):", "if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, }) != 1: return", "1} ) ] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token", "= RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection = OtherUserInspection() other_users_inspection =", "expires_in=None, include_refresh_token=True): if 'offline_access' not in scope_to_list(scope): include_refresh_token = False", "token_type_hint: Optional[TypeHint], request: TypedRequest ) -> Response: token_data = None", "request: TypedRequest): assert isinstance(request, OAuth2Request) return request def create_json_request(self, request):", "request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}}, ) if mod_result.modified_count !=", "= config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code,", "def create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request)", "del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error as error: return", "user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name", "= [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info = {'sub': user.user.id, 'roles':", "code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code),", "as error: return authorization.handle_error_response(request, error) class TypeHint(str, Enum): AccessToken =", "def authenticate_user(self, username: str, password: str): user_data = user_collection.find_one({'email': username,", "JSONResponse(user_infos) except OAuth2Error as error: return authorization.handle_error_response(request, error) class TypeHint(str,", "= config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self,", "save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self, grant:", "client_id}) if client_data is None: return None return DbClient.validate_document(client_data) async", "jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] = int(time.time()) user_info = {'sub': user.user.id,", "InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\", \"client_secret_post\"] )", "status_code=status_code, headers=headers, ) assert False def save_authorization_code(code: str, request: TypedRequest):", "mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce': None}},", "prop.type == UserPropertyType.picture: if value is not None: value =", "user is None: raise HTTPException(404, \"User not found\") user_info =", "token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any],", "\"*users\" scope', request.uri) user = await UserWithRoles.async_load(user_id, client_id) if user", "scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for scope_name in", "import is_openid_scope, generate_id_token from fastapi import HTTPException from starlette.concurrency import", "TypedRequest): assert isinstance(request, OAuth2Request) return request def create_json_request(self, request): assert", "headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) ->", "scope_list = ['*'] + scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type,", "create_response( self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest ) ->", "= { 'roles': user.roles, } for key, prop, group_type, group_by_name", "error) class OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest, user_id: str,", "import BearerTokenValidator as _BearerTokenValidator, BearerToken as _BearerToken, \\ InsufficientScopeError from", "headers=headers, ) elif headers.get('Location'): assert not body return ErrorRedirectResponse( status_code=status_code,", "client def authenticate_user(self, username: str, password: str): user_data = user_collection.find_one({'email':", "not scope or not is_openid_scope(scope): # standard authorization code flow", "group['group_name'] if group_by_name else group['_id'] for group in user_group_collection.find( {'_id':", "DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType from user_manager.common.mongo", "delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return", "prop.key): continue value = getattr(user.user, prop.key, None) if prop.type ==", "( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return", "async def create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request,", "class OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest, user_id: str, client_auth:", "scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope', request.uri) user = await UserWithRoles.async_load(user_id,", "None: return None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id,", "if group_by_name else group['_id'] for group in user_group_collection.find( {'_id': {'$in':", "request): assert isinstance(request, HttpRequest) raise NotImplementedError() # TODO: Create HttpRequest", "user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return item", "async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from . import oauth2_key from", "token # support all openid grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant)", "is None: return None return DbClient.validate_document(client_data) async def async_query_client(client_id: str):", "True}}) # token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) await async_token_collection.delete_one({'_id': token.access_token}) return", "get_jwt_config(self, *args, **kwargs): return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss':", "assert isinstance(request, OAuth2Request) if request.client is None: request.token = await", "import datetime, timedelta from enum import Enum from typing import", "DbManagerSchema, DbUserProperty, \\ UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection, \\", "token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None: return", "'user_id': user_id, 'issued_at': now, 'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope':", "DbToken] client: DbClient class RedirectResponse(Response): def to_json_response(self) -> JSONResponse: return", "= client_collection.find_one({'_id': client_id}) if client_data is None: return None return", "= token.client_id elif token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401) await", "in user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if", "None) if prop.type == UserPropertyType.picture: if value is not None:", "scope_name in scope_list if scope_name not in ('openid', 'offline_access') and", "ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif headers.get('Location'): assert not body", "async def create_response(self, request: TypedRequest, origin: str) -> Optional[Response]: try:", "starlette.responses import Response, JSONResponse from user_manager.common.config import config from user_manager.common.models", "in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value", "None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode): jwt_token_expiration", "AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code: str, request: TypedRequest): return", "def get_jwt_config(self, *args, **kwargs): return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value,", "return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDCode):", "ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier =", "if auth_code.is_expired(): return None return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode):", "grant_type: str): return getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self, client,", "config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit class", "= super(self).authenticate_token_endpoint_client() self.request.client = client return client def authenticate_user(self, username:", "token_data is None: return None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired():", "grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access' not in scope_to_list(scope):", "process_token(self, grant: BaseGrant, token: dict): scope = token.get('scope') if not", "-> Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request,", "TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self,", "getattr(config.oauth2.token_expiration, grant_type) class BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None, scope=None,", "await async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error as error: return", "password: str): user_data = user_collection.find_one({'email': username, 'access_tokens.token': password, 'active': True})", "RefreshToken = \"refresh_token\" class RevocationEndpoint: async def create_response( self, raw_token:", "return None auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return None return", "return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False def save_authorization_code(code: str,", "generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length) def save_authorization_code(self, code: str, request:", "OAuth2Request) return self.validate_request(scope, request, scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self,", "scope: str): user_data = { 'roles': user.roles, } for key,", "dict(headers) if isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, )", "token_data = None if token_type_hint is None or token_type_hint ==", "InsufficientScopeError('Missing \"*users\" scope', request.uri) user = await UserWithRoles.async_load(user_id, client_id) if", "'offline_access' not in scope_to_list(scope): include_refresh_token = False return super(BearerToken, self).__call__(client,", "JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse):", "DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() +", "class OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response: try:", "resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation = RevocationEndpoint()", "UserInfoMixin(object): def _translate_properties( self, scope: str, schema: DbManagerSchema, ) ->", "= [ group['group_name'] if group_by_name else group['_id'] for group in", "[\"none\", \"client_secret_basic\", \"client_secret_post\"] ) # await async_token_collection.update_one({'_id': token.access_token}, {'$set': {'revoked':", "this to set the client in the request, to make", "else group['_id'] async for group in async_user_group_collection.find( {'_id': {'$in': value},", "def to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers),", "scope = scopes load_roles = True if USERS_SCOPE not in", "Union import time from authlib.common.security import generate_token from authlib.consts import", "JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self, client: DbClient, grant_type: str,", "save_token(token: Dict[str, Any], request: TypedRequest): if request.user: user_id = request.user.user.id", "await UserWithRoles.async_load(user_id, client_id) if user is None: raise HTTPException(404, \"User", "{'client_id': request.client_id, 'nonce': nonce}, # limit=1, # ) mod_result =", "as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749", "None: value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups: group_filter =", "RevocationEndpoint: async def create_response( self, raw_token: str, token_type_hint: Optional[TypeHint], request:", "return token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope: str", "self.process_token) def process_token(self, grant: BaseGrant, token: dict): scope = token.get('scope')", "DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return auth_code def delete_authorization_code(self, authorization_code:", "import time from authlib.common.security import generate_token from authlib.consts import default_json_headers", "code: str, request: TypedRequest): return save_authorization_code(code, request) class RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS", "user_data = { 'roles': user.roles, } for key, prop, group_type,", "return auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self,", "pass class ErrorRedirectResponse(RedirectResponse): def to_json_response(self) -> JSONResponse: return ErrorJSONResponse( content={'redirect_uri':", "exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce}, # limit=1,", "None: return Response() token = DbToken.validate_document(token_data) try: if request.client_id is", "Response() except OAuth2Error as error: return authorization.handle_error_response(request, error) resource_protector =", "return True class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs):", "_OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util", "'expiration_time': datetime.utcnow() + timedelta(seconds=token.get('expires_in', 0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token", "[OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant) class BearerTokenValidator(_BearerTokenValidator): def authenticate_token(self, token_string: str): token_data", "is None: raise HTTPException(403, \"Invalid token\") request.user = await UserWithRoles.async_load(request.token.user_id,", "as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import is_openid_scope,", "await self.async_generate_user_info(user, scope) if not load_roles: del user_info['roles'] user_infos.append(user_info) return", "= token_collection.find_one({'_id': token_string}) if token_data is None: return None token", "available to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client = client return", "def create_json_request(self, request): assert isinstance(request, HttpRequest) raise NotImplementedError() # TODO:", "except OAuth2Error as error: return authorization.handle_error_response(request, error) class TypeHint(str, Enum):", "code, 'client_id': client.id}) if auth_code_data is None: return None auth_code", "user: UserWithRoles, scope: str): jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()]", "username: str, password: str): user_data = user_collection.find_one({'email': username, 'access_tokens.token': password,", "None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id,", "Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response): def to_json_response(self) -> JSONResponse:", "= request.token.client_id scopes = request.token.scope scope = USERS_SCOPE else: client_id", "# token_collection.update_one({'_id': credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str,", "to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client = client return client", "_ResourceProtector, OAuth2Error, HttpRequest, ) from authlib.oauth2.rfc6749 import InvalidClientError from authlib.oauth2.rfc6749.grants", "USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken]", "request.uri) user_infos = [] for user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles):", "value = getattr(user.user, prop.key, None) if prop.type == UserPropertyType.picture: if", "= True def authenticate_refresh_token(self, refresh_token: str): token_data = token_collection.find_one({'refresh_token': refresh_token})", "as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import", "str): client_data = client_collection.find_one({'_id': client_id}) if client_data is None: return", "import config from user_manager.common.models import DbAuthorizationCode, DbToken, DbClient, DbUser, DbManagerSchema,", "TypedRequest): # exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce},", "**group_filter}, projection={'group_name' if group_by_name else '_id': 1} ) ] elif", "request def create_json_request(self, request): assert isinstance(request, HttpRequest) raise NotImplementedError() #", "TypedRequest, origin: str) -> Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token", "is None: return None return UserWithRoles.load_groups(DbUser.validate_document(user_data), self.client.id) class OpenIDCode(UserInfoMixin, ExistsNonceMixin,", "def __call__(self, client: DbClient, grant_type: str, user: UserWithRoles, scope: str):", "1: return False return True class JwtConfigMixin(object): jwt_token_expiration: int def", "None: raise HTTPException(403, \"Invalid token\") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id)", "ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request: TypedRequest): # exists =", "0)), 'scope': request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data", "JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code,", "from authlib.oauth2 import ( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as", "Dict, Any, Union import time from authlib.common.security import generate_token from", "= None now = int(time.time()) token_data = DbToken.validate_document({ 'client_id': request.client.id,", "return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token},", "user_group_collection, async_token_collection, \\ async_user_group_collection, async_client_collection, user_collection, read_schema, async_read_schema from .", "return Response() except OAuth2Error as error: return authorization.handle_error_response(request, error) resource_protector", "token\") request.user = await UserWithRoles.async_load(request.token.user_id, request.token.client_id) user_info = await self.async_generate_user_info(request.user,", "return client def authenticate_user(self, username: str, password: str): user_data =", "UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data)", "to_json_response(self) -> JSONResponse: return JSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=200, headers=dict(default_json_headers), )", "'roles': user.roles} if 'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups return", "read_schema, async_read_schema from . import oauth2_key from .user_helper import UserWithRoles", "if isinstance(body, dict): return ErrorJSONResponse( content=body, status_code=status_code, headers=headers, ) elif", "def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential:", "self.headers['Location']}, status_code=200, headers=dict(default_json_headers), ) class ErrorJSONResponse(JSONResponse): pass class ErrorRedirectResponse(RedirectResponse): def", "user in await UserWithRoles.async_load_all(client_id, load_roles=load_roles): user_info = await self.async_generate_user_info(user, scope)", "value = f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups: group_filter = {}", "Response: token_data = None if token_type_hint is None or token_type_hint", "}) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str): client_data = client_collection.find_one({'_id':", "authlib.oauth2.rfc6749.grants import ( ResourceOwnerPasswordCredentialsGrant as _ResourceOwnerPasswordCredentialsGrant, ) from authlib.oauth2.rfc6749.util import", "RefreshTokenGrant(_RefreshTokenGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self,", "InvalidClientError from authlib.oauth2.rfc6749.grants import ( AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as", "request.token is None: raise HTTPException(403, \"Invalid token\") request.user = await", "authorization.handle_error_response(request, error) class RequestOriginVerifier: async def create_response(self, request: TypedRequest, origin:", "if token_data is None and (token_type_hint is None or token_type_hint", "None: return None return DbClient.validate_document(client_data) async def async_query_client(client_id: str): client_data", "else: client_id = request.client_id scopes = request.client.allowed_scope scope = scopes", "user_data[key] = value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none',", "status_code=status_code, headers=headers) elif headers.get('Location'): assert not payload return RedirectResponse(status_code=status_code, headers=headers)", "Tuple, Dict, Any, Union import time from authlib.common.security import generate_token", "key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()): if not", "is None: return None token = DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id':", "elif token.client_id != request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client,", "request) if request.token is None: raise HTTPException(403, \"Invalid token\") request.user", "= await self.async_generate_user_info(user, scope) if not load_roles: del user_info['roles'] user_infos.append(user_info)", "client_data = await async_client_collection.find_one({'_id': client_id}) if client_data is None: return", "authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client = client return client def", "except OAuth2Error as error: return authorization.handle_error_response(request, error) resource_protector = ResourceProtector()", "headers = dict(headers) if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers)", "str, request: TypedRequest): # exists = mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id,", "is None: return None return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length)", "user.user.id, 'roles': user.roles} if 'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups", "auth_code def delete_authorization_code(self, authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code:", "error) resource_protector = ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation =", "scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document()) return", "handle_error_response(self, request: TypedRequest, error: OAuth2Error): status_code, body, headers = error(", "DbClient, DbUser, DbManagerSchema, DbUserProperty, \\ UserPropertyType from user_manager.common.mongo import authorization_code_collection,", "_translate_properties( self, scope: str, schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty,", "class TypeHint(str, Enum): AccessToken = \"access_token\" RefreshToken = \"refresh_token\" class", "if isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert", "query_authorization_code(self, code: str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id':", "def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code", "scope = scopes if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing", "prop.group_type, prop.group_by_name) for scope_name in scope_list if scope_name not in", "authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import UserInfo from authlib.oidc.core.grants import", "= ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def save_authorization_code(self, code:", "class ResourceProtector(_ResourceProtector): def validate(self, request: OAuth2Request, scope: str = None,", "DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration =", "credential: Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response): def to_json_response(self) ->", "HTTPException from starlette.concurrency import run_in_threadpool from starlette.responses import Response, JSONResponse", "request) if request.token is None: raise HTTPException(403, \"Invalid token\") request.client", "OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, ) from authlib.oidc.core.grants.util import", "scope', request.uri) user_infos = [] for user in await UserWithRoles.async_load_all(client_id,", "from enum import Enum from typing import List, Optional, Tuple,", "load_roles = False else: client_id = request.client_id scopes = request.client.allowed_scope", "request: TypedRequest ) -> Response: token_data = None if token_type_hint", "if not load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos) except OAuth2Error", "# standard authorization code flow return token token['session_state'] = str(grant.request.user.last_modified)", "True class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self, *args, **kwargs): return", "return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin): jwt_token_expiration = config.oauth2.token_expiration.authorization_code def __call__(self,", "client_auth: dict = None) -> Response: try: assert isinstance(request, OAuth2Request)", "\"Invalid client in token\") if not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed", "= DbToken.validate_document(token_data) if auth_code.is_expired(): return None return auth_code def authenticate_user(self,", "as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants import", "TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token:", "return { 'key': oauth2_key.key.key, 'alg': oauth2_key.key.jwk.alg.value, 'iss': config.oauth2.issuer, 'exp': self.jwt_token_expiration,", "create_response(self, request: TypedRequest) -> Response: try: assert isinstance(request, OAuth2Request) request.token", "user_collection, read_schema, async_read_schema from . import oauth2_key from .user_helper import", "'_id': 1} ) ] elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password,", "await run_in_threadpool(resource_protector.validate_request, None, request) if request.token is None: raise HTTPException(403,", "nonce = request.data.get('nonce') item = DbAuthorizationCode( code=code, client_id=request.client.id, redirect_uri=request.redirect_uri, scope=request.scope,", "= \"access_token\" RefreshToken = \"refresh_token\" class RevocationEndpoint: async def create_response(", "UserPropertyType.password, UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) class", "assert not payload return RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self,", "super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in, include_refresh_token) authorization = AuthorizationServer(", ") mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set': {'nonce':", "def __call__(self, client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True): if 'offline_access'", "= request.client_id scopes = request.client.allowed_scope scope = scopes if USERS_SCOPE", "starlette.concurrency import run_in_threadpool from starlette.responses import Response, JSONResponse from user_manager.common.config", "assert isinstance(request, OAuth2Request) return request def create_json_request(self, request): assert isinstance(request,", "AuthorizationCodeGrant as _AuthorizationCodeGrant, RefreshTokenGrant as _RefreshTokenGrant, BaseGrant, ) from authlib.oauth2.rfc6749.grants", "= config.oauth2.token_expiration.authorization_code class OpenIDImplicitGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDImplicitGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit", "grants authorization.register_grant(AuthorizationCodeGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(OpenIDImplicitGrant) authorization.register_grant(OpenIDHybridGrant) authorization.register_grant(RefreshTokenGrant, [OpenIDCode(), OpenIDSessionState()]) authorization.register_grant(ResourceOwnerPasswordCredentialsGrant)", "save_authorization_code(self, code: str, request: TypedRequest): return save_authorization_code(code, request) def query_authorization_code(self,", "isinstance(payload, dict): return JSONResponse(payload, status_code=status_code, headers=headers) elif headers.get('Location'): assert not", "class BearerToken(_BearerToken): def __call__(self, client, grant_type, user=None, scope=None, expires_in=None, include_refresh_token=True):", "self, raw_token: str, token_type_hint: Optional[TypeHint], request: TypedRequest ) -> Response:", "origin: str) -> Optional[Response]: try: assert isinstance(request, OAuth2Request) request.token =", "token_data def query_client(client_id: str): client_data = client_collection.find_one({'_id': client_id}) if client_data", "in body. def handle_response(self, status_code: int, payload: Optional[dict], headers: List[Tuple[str,", "headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers = dict(headers) if", "= False else: client_id = request.client_id scopes = request.client.allowed_scope scope", "return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest)", "_BearerTokenValidator, BearerToken as _BearerToken, \\ InsufficientScopeError from authlib.oauth2.rfc8414 import AuthorizationServerMetadata", "return ErrorJSONResponse( content={'redirect_uri': self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class", "self.headers['Location']}, status_code=401, headers=dict(default_json_headers), ) class AuthorizationServer(_AuthorizationServer): metadata_class = AuthorizationServerMetadata def", "refresh_token_generator=token_generator), ) class OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token)", "JSONResponse from user_manager.common.config import config from user_manager.common.models import DbAuthorizationCode, DbToken,", "OAuth2Error): status_code, body, headers = error( translations=self.get_translations(request), error_uris=self.get_error_uris(request) ) headers", "user.roles, } for key, prop, group_type, group_by_name in self._translate_properties(scope, await", "error) class OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest) -> Response:", "= \"refresh_token\" class RevocationEndpoint: async def create_response( self, raw_token: str,", "return None auth_code = DbAuthorizationCode.validate_document(auth_code_data) if auth_code.is_expired(): return None return", "oauth2_key from .user_helper import UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request):", "TypedRequest): return False def token_revoked(self, token: DbToken): return token.revoked class", "= await async_token_collection.find_one({'_id': raw_token}) if token_data is None and (token_type_hint", "async def async_generate_user_info(self, user: UserWithRoles, scope: str): user_data = {", "= mongo.authorization_code_collection.count_documents( # {'client_id': request.client_id, 'nonce': nonce}, # limit=1, #", "class ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request: TypedRequest): # exists", "= value return UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles, scope:", "UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient class RedirectResponse(Response): def to_json_response(self)", "scope: str): jwt_config = self.get_jwt_config() jwt_config['aud'] = [client.get_client_id()] jwt_config['auth_time'] =", "# limit=1, # ) mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce':", "key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()): if not hasattr(user.user,", "str, user: UserWithRoles, scope: str): jwt_config = self.get_jwt_config() jwt_config['aud'] =", "return False def token_revoked(self, token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector):", "async_read_schema()): if not hasattr(user.user, prop.key): continue value = getattr(user.user, prop.key,", "= DbToken.validate_document(token_data) if client_user_cache_collection.count_documents({ 'client_id': token.client_id, 'user_id': token.user_id, }) !=", "} for key, prop, group_type, group_by_name in self._translate_properties(scope, read_schema()): if", "request, to make it available to authenticate_user client = super(self).authenticate_token_endpoint_client()", "class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode, DbToken] client: DbClient class", "assert not body return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert False", "for key, prop, group_type, group_by_name in self._translate_properties(scope, await async_read_schema()): if", "def create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request) return request def", "} class UserInfoMixin(object): def _translate_properties( self, scope: str, schema: DbManagerSchema,", "authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id, credential.client_id) def revoke_old_credential(self, credential: DbToken):", "scopes if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\" scope',", "group['group_name'] if group_by_name else group['_id'] async for group in async_user_group_collection.find(", "return False return True class JwtConfigMixin(object): jwt_token_expiration: int def get_jwt_config(self,", "# ) mod_result = authorization_code_collection.update_one( {'client_id': request.client_id, 'nonce': nonce}, {'$set':", "if token_data is None: return Response() token = DbToken.validate_document(token_data) try:", "RedirectResponse(status_code=status_code, headers=headers) assert False def handle_error_response(self, request: TypedRequest, error: OAuth2Error):", "{'$in': value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name else '_id':", "scope) return JSONResponse(user_info) except OAuth2Error as error: return authorization.handle_error_response(request, error)", "OAuth2Request) return request def create_json_request(self, request): assert isinstance(request, HttpRequest) raise", "= scopes if USERS_SCOPE not in scope_to_list(scopes): raise InsufficientScopeError('Missing \"*users\"", "auth_code = DbToken.validate_document(token_data) if auth_code.is_expired(): return None return auth_code def", "scope, expires_in, include_refresh_token) authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in,", "['*'] + scope_to_list(scope) return [ (prop.valid_key, schema.properties_by_key[prop.user_property], prop.group_type, prop.group_by_name) for", "HttpRequest) raise NotImplementedError() # TODO: Create HttpRequest with json in", "= ['none', 'client_secret_basic'] INCLUDE_NEW_REFRESH_TOKEN = True def authenticate_refresh_token(self, refresh_token: str):", "UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH =", "'visible': True, **group_filter}, projection={'group_name' if group_by_name else '_id': 1} )", "def request_invalid(self, request: TypedRequest): return False def token_revoked(self, token: DbToken):", "class UserInfoMixin(object): def _translate_properties( self, scope: str, schema: DbManagerSchema, )", "in self._translate_properties(scope, read_schema()): if not hasattr(user.user, prop.key): continue value =", "UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection = OtherUserInspection()", "as error: return authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def create_response(self,", "import ( OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as", "= config.oauth2.token_expiration.implicit class OpenIDHybridGrant(UserInfoMixin, ExistsNonceMixin, JwtConfigMixin, _OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit", "f\"{config.oauth2.base_url}/picture/{value}\" elif prop.type == UserPropertyType.groups: group_filter = {} if group_type", "import run_in_threadpool from starlette.responses import Response, JSONResponse from user_manager.common.config import", "Must override this to set the client in the request,", "Optional[str], Optional[bool]]]: scope_list = ['*'] + scope_to_list(scope) return [ (prop.valid_key,", "token.user_id, }) != 1: return None return token def request_invalid(self,", "request.scope, 'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id:", "OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest, )", "dict = None) -> Response: try: assert isinstance(request, OAuth2Request) if", "group_filter = {} if group_type is None else {'group_type': group_type}", "scope = USERS_SCOPE load_roles = False else: client_id = request.client_id", "token_collection.find_one({'refresh_token': refresh_token}) if token_data is None: return None auth_code =", "= scopes load_roles = True if USERS_SCOPE not in scope_to_list(scopes):", "authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(), expires_generator=token_expires_in, refresh_token_generator=token_generator), ) class", "( OAuth2Request, AuthorizationServer as _AuthorizationServer, ResourceProtector as _ResourceProtector, OAuth2Error, HttpRequest,", "HTTPException(403, \"Allowed redirect uri does not match request\") return None", "headers.get('Location'): assert not body return ErrorRedirectResponse( status_code=status_code, headers=headers, ) assert", "not is_openid_scope(scope): # standard authorization code flow return token token['session_state']", "def token_revoked(self, token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def validate(self,", "from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from authlib.oidc.core import UserInfo from authlib.oidc.core.grants", "value}, 'visible': True, **group_filter}, projection={'group_name' if group_by_name else '_id': 1}", "request: OAuth2Request, scope: str = None, scope_operator='AND') -> DbToken: assert", "return Response() token = DbToken.validate_document(token_data) try: if request.client_id is None:", "mod_result.modified_count != 1: return False return True class JwtConfigMixin(object): jwt_token_expiration:", ".user_helper import UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user: UserWithRoles", "scope_list if scope_name not in ('openid', 'offline_access') and scope_name in", "token_data = DbToken.validate_document({ 'client_id': request.client.id, 'user_id': user_id, 'issued_at': now, 'expiration_time':", "-> Response: try: assert isinstance(request, OAuth2Request) request.token = await run_in_threadpool(resource_protector.validate_request,", "request.client_id: raise InvalidClientError(state=request.state, status_code=401) await run_in_threadpool( authorization.authenticate_client, request, [\"none\", \"client_secret_basic\",", "None, scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope, request,", "include_refresh_token = False return super(BearerToken, self).__call__(client, grant_type, user, scope, expires_in,", "user_data[key] = value return UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles,", "as error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def create_response(self,", "_OpenIDHybridGrant): jwt_token_expiration = config.oauth2.token_expiration.implicit def generate_authorization_code(self) -> str: return generate_token(config.oauth2.authorization_code_length)", "def save_token(token: Dict[str, Any], request: TypedRequest): if request.user: user_id =", "credential.client_id) def revoke_old_credential(self, credential: DbToken): # token_collection.update_one({'_id': credential.access_token}, {'revoked': True})", "grant: BaseGrant, token: dict): scope = token.get('scope') if not scope", "self.async_generate_user_info(user, scope) if not load_roles: del user_info['roles'] user_infos.append(user_info) return JSONResponse(user_infos)", "'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties( self, scope: str,", "class OpenIDSessionState: def __call__(self, grant: BaseGrant): grant.register_hook('process_token', self.process_token) def process_token(self,", "int(time.time()) user_info = {'sub': user.user.id, 'roles': user.roles} if 'groups' in", "request, scope_operator) class UserIntrospection(UserInfoMixin): async def create_response(self, request: TypedRequest) ->", "async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None: return Response() token =", "UserWithRoles USERS_SCOPE = '*users' class TypedRequest(OAuth2Request): user: UserWithRoles credential: Union[DbAuthorizationCode,", ") from authlib.oauth2.rfc6749.util import scope_to_list from authlib.oauth2.rfc6750 import BearerTokenValidator as", "token\") if not request.client.check_redirect_uri(origin): raise HTTPException(403, \"Allowed redirect uri does", "= str(grant.request.user.last_modified) return token # support all openid grants authorization.register_grant(AuthorizationCodeGrant,", "DbUserProperty, \\ UserPropertyType from user_manager.common.mongo import authorization_code_collection, token_collection, \\ client_collection,", "True}) await async_token_collection.delete_one({'_id': token.access_token}) return Response() except OAuth2Error as error:", "None return DbClient.validate_document(client_data) def token_generator(*_): return generate_token(config.oauth2.token_length) class AccessTokenGenerator(UserInfoMixin, JwtConfigMixin):", "\"*users\" scope', request.uri) user_infos = [] for user in await", "return token token['session_state'] = str(grant.request.user.last_modified) return token # support all", "error: return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def create_response(self, request:", "str, client: DbClient): auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if", "Enum from typing import List, Optional, Tuple, Dict, Any, Union", "scope_to_list(scope): include_refresh_token = False return super(BearerToken, self).__call__(client, grant_type, user, scope,", "def async_query_client(client_id: str): client_data = await async_client_collection.find_one({'_id': client_id}) if client_data", "it available to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client = client", "value return UserInfo(**user_data) async def async_generate_user_info(self, user: UserWithRoles, scope: str):", "credential.access_token}, {'revoked': True}) token_collection.delete_one({'_id': credential.access_token}) def save_token(token: Dict[str, Any], request:", "= ResourceProtector() resource_protector.register_token_validator(BearerTokenValidator()) user_introspection = UserIntrospection() token_revocation = RevocationEndpoint() request_origin_verifier", "user, scope, expires_in, include_refresh_token) authorization = AuthorizationServer( query_client, save_token, BearerToken(AccessTokenGenerator(),", "make it available to authenticate_user client = super(self).authenticate_token_endpoint_client() self.request.client =", "client_data is None: return None return DbClient.validate_document(client_data) async def async_query_client(client_id:", "UserWithRoles.load(authorization_code.user_id, authorization_code.client_id) class ResourceOwnerPasswordCredentialsGrant(_ResourceOwnerPasswordCredentialsGrant): def authenticate_token_endpoint_client(self): # Must override this", "scope_operator='AND') -> DbToken: assert isinstance(request, OAuth2Request) return self.validate_request(scope, request, scope_operator)", "1: return None return token def request_invalid(self, request: TypedRequest): return", "self, scope: str, schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str],", "datetime, timedelta from enum import Enum from typing import List,", "= await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is None: return Response()", "group_type} value = [ group['group_name'] if group_by_name else group['_id'] for", "assert isinstance(request, HttpRequest) raise NotImplementedError() # TODO: Create HttpRequest with", "AuthorizationCodeGrant(_AuthorizationCodeGrant): TOKEN_ENDPOINT_AUTH_METHODS = ['none', 'client_secret_basic', 'client_secret_post'] AUTHORIZATION_CODE_LENGTH = config.oauth2.authorization_code_length def", "def _translate_properties( self, scope: str, schema: DbManagerSchema, ) -> List[Tuple[str,", "client_id) if user is None: raise HTTPException(404, \"User not found\")", "Response, JSONResponse from user_manager.common.config import config from user_manager.common.models import DbAuthorizationCode,", "Any], request: TypedRequest): if request.user: user_id = request.user.user.id else: user_id", "token_data = token_collection.find_one({'_id': token_string}) if token_data is None: return None", "client return client def authenticate_user(self, username: str, password: str): user_data", "return None return auth_code def authenticate_user(self, credential: DbToken): return UserWithRoles.load(credential.user_id,", "isinstance(request, HttpRequest) raise NotImplementedError() # TODO: Create HttpRequest with json", "'groups' in scope_to_list(scope): user_info['groups'] = user.user.groups return generate_id_token({}, user_info, code=generate_token(config.oauth2.access_token_length),", "token_revoked(self, token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def validate(self, request:", "}) != 1: return None return token def request_invalid(self, request:", "RevocationEndpoint() request_origin_verifier = RequestOriginVerifier() other_user_inspection = OtherUserInspection() other_users_inspection = OtherUsersInspection()", "UserPropertyType.token ): continue user_data[key] = value return UserInfo(**user_data) class AuthorizationCodeGrant(_AuthorizationCodeGrant):", "elif prop.type in ( UserPropertyType.access_token, UserPropertyType.password, UserPropertyType.token ): continue user_data[key]", "'user_id': token.user_id, }) != 1: return None return token def", "== TypeHint.RefreshToken): token_data = await async_token_collection.find_one({'refresh_token': raw_token}) if token_data is", "redirect_uri=request.redirect_uri, scope=request.scope, user_id=request.user.user.id, nonce=nonce, auth_time=int(time.time()), expiration_time=datetime.utcnow() + timedelta(seconds=config.oauth2.token_expiration.authorization_code), ) authorization_code_collection.insert_one(item.document())", "for scope_name in scope_list if scope_name not in ('openid', 'offline_access')", "DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id, authorization_code.client_id)", "schema: DbManagerSchema, ) -> List[Tuple[str, DbUserProperty, Optional[str], Optional[bool]]]: scope_list =", "return authorization.handle_error_response(request, error) class OtherUserInspection(UserInfoMixin): async def create_response(self, request: TypedRequest,", "projection={'group_name' if group_by_name else '_id': 1} ) ] elif prop.type", "Optional[dict], headers: List[Tuple[str, str]]): headers = dict(headers) if isinstance(payload, dict):", "authlib.oidc.core.grants.util import is_openid_scope, generate_id_token from fastapi import HTTPException from starlette.concurrency", "Dict[str, Any], request: TypedRequest): if request.user: user_id = request.user.user.id else:", "config.oauth2.issuer, 'exp': self.jwt_token_expiration, } class UserInfoMixin(object): def _translate_properties( self, scope:", "token['session_state'] = str(grant.request.user.last_modified) return token # support all openid grants", "authorization_code: DbAuthorizationCode): authorization_code_collection.delete_one({'_id': authorization_code.code}) def authenticate_user(self, authorization_code: DbAuthorizationCode): return UserWithRoles.load(authorization_code.user_id,", "authorization.handle_error_response(request, error) class OtherUsersInspection(UserInfoMixin): async def create_response(self, request: TypedRequest) ->", "group_by_name in self._translate_properties(scope, await async_read_schema()): if not hasattr(user.user, prop.key): continue", "return JSONResponse(user_infos) except OAuth2Error as error: return authorization.handle_error_response(request, error) class", "auth_code_data = authorization_code_collection.find_one({'_id': code, 'client_id': client.id}) if auth_code_data is None:", "False def token_revoked(self, token: DbToken): return token.revoked class ResourceProtector(_ResourceProtector): def", "= request.token.scope scope = USERS_SCOPE else: client_id = request.client_id scopes", "OpenIDCode as _OpenIDCode, OpenIDImplicitGrant as _OpenIDImplicitGrant, OpenIDHybridGrant as _OpenIDHybridGrant, )", "authorization_code_collection.insert_one(item.document()) return item class ExistsNonceMixin(object): def exists_nonce(self, nonce: str, request:", "= await async_query_client(request.token.client_id) if request.client is None: raise HTTPException(403, \"Invalid", "'auth_time': request.credential.get_auth_time(), **token }) token_collection.insert_one(token_data.document()) return token_data def query_client(client_id: str):", "= token.get('scope') if not scope or not is_openid_scope(scope): # standard", "for group in async_user_group_collection.find( {'_id': {'$in': value}, 'visible': True, **group_filter},", "AuthorizationServerMetadata def create_oauth2_request(self, request: TypedRequest): assert isinstance(request, OAuth2Request) return request" ]
[ "from typing import Tuple from asyncio import AbstractEventLoop logger =", "None: \"\"\" Register a protocol instance with the server. :param", "\"localhost\", port: int = 30003, backlog=100, loop: AbstractEventLoop = None,", "import protocol from typing import Tuple from asyncio import AbstractEventLoop", "# an ephemeral port. assert len(self.listener.sockets) == 1 _, self.port", "use # an ephemeral port. assert len(self.listener.sockets) == 1 _,", "assert len(self.listener.sockets) == 1 _, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError:", "in use. This can be different from the # specified", "Tuple[str, int], prot: \"SBSServerProtocol\" ) -> None: \"\"\" Register a", "a SBSServerProtocol instance. \"\"\" self.protocols[peer] = prot def deregister_protocol(self, peer:", "can be different from the # specified port if the", "None, ) -> None: self.loop = loop or asyncio.get_event_loop() self.host", "was cancelled\") except Exception: logger.exception(\"An error occurred in start\") async", "can't send msg to non-existant peer: {peer}\" ) else: #", "= None self.backlog = backlog self.listener = None self.protocols =", "while it is being iterating over. peers = list(self.protocols) for", "-> None: self.loop = loop or asyncio.get_event_loop() self.host = host", "datetime import logging import socket from . import protocol from", "to send to peers. The message is assumed to include", "message to send to peers. The message is assumed to", "else: raise Exception( f\"Server can't send msg to non-existant peer:", "a Tuple of (host:str, port:int). If not specified then the", "of (host:str, port:int). :param prot: a SBSServerProtocol instance. \"\"\" self.protocols[peer]", "protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server #", "to non-existant peer: {peer}\" ) else: # broadcast message to", "is assumed to include the end of message delimiter. :param", ") -> None: \"\"\" Register a protocol instance with the", "cancelled\") except Exception: logger.exception(\"An error occurred in start\") async def", ". import protocol from typing import Tuple from asyncio import", ":param peer: A specific peer to send the message to.", "from . import protocol from typing import Tuple from asyncio", "# Avoid iterating over the protocols dict which may change", "{} async def start(self) -> None: \"\"\" Start the server", "iterating over. peers = list(self.protocols) for peer in peers: prot", "is a Tuple of (host:str, port:int). If not specified then", "Register a protocol instance with the server. :param peer: Tuple", "asyncio import datetime import logging import socket from . import", "message to all peers for peer, prot in self.protocols.items(): prot.send_message(msg)", "be different from the # specified port if the port", "<reponame>claws/adsb import asyncio import datetime import logging import socket from", "representing the SBS format message to send to peers. The", "assumed to include the end of message delimiter. :param peer:", "import asyncio import datetime import logging import socket from .", "\"\"\" self.protocols[peer] = prot def deregister_protocol(self, peer: Tuple[str, int]) ->", "end of message delimiter. :param peer: A specific peer to", "self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog,", "to peers. The message is assumed to include the end", "self.listener = None self.protocols = {} async def start(self) ->", "protocol instance with the server. :param peer: Tuple of (host:str,", "-> None: \"\"\" Register a protocol instance with the server.", "family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server # Fetch actual port", "This peer will no longer receive messages. :param peer: Tuple", "\"\"\" Send a message. :param msg: A bytes object representing", "send_message(self, msg: bytes, peer: Tuple[str, int] = None) -> None:", "port:int). If not specified then the message is broadcast to", "# specified port if the port was passed as 0", "protocol from typing import Tuple from asyncio import AbstractEventLoop logger", "the message to. Peer is a Tuple of (host:str, port:int).", "not specified then the message is broadcast to all peers.", "non-existant peer: {peer}\" ) else: # broadcast message to all", "self.protocols.get(peer) if prot: prot.send_message(msg) else: raise Exception( f\"Server can't send", "Send a message. :param msg: A bytes object representing the", "port:int). :param prot: a SBSServerProtocol instance. \"\"\" self.protocols[peer] = prot", "peer: Tuple[str, int], prot: \"SBSServerProtocol\" ) -> None: \"\"\" Register", "str = \"localhost\", port: int = 30003, backlog=100, loop: AbstractEventLoop", "is broadcast to all peers. \"\"\" if self.protocols: if peer:", ") -> None: self.loop = loop or asyncio.get_event_loop() self.host =", "-> None: \"\"\" Stop the server \"\"\" if self.listener: #", "# broadcast message to all peers for peer, prot in", "prot in self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server can't send msg,", "means use # an ephemeral port. assert len(self.listener.sockets) == 1", "\"\"\" Start the server \"\"\" try: self.listener = await self.loop.create_server(", "try: self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET,", "else: # broadcast message to all peers for peer, prot", "None) -> None: \"\"\" Send a message. :param msg: A", "De-register a protocol instance from the server. This peer will", ":param prot: a SBSServerProtocol instance. \"\"\" self.protocols[peer] = prot def", "f\"Server can't send msg to non-existant peer: {peer}\" ) else:", "lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server", "None self.backlog = backlog self.listener = None self.protocols = {}", "= None, ) -> None: self.loop = loop or asyncio.get_event_loop()", "the # specified port if the port was passed as", "def stop(self) -> None: \"\"\" Stop the server \"\"\" if", "host self._requested_port = port self.port = None self.backlog = backlog", "being iterating over. peers = list(self.protocols) for peer in peers:", "SBS format message to send to peers. The message is", "as 0 which means use # an ephemeral port. assert", "except Exception: logger.exception(\"An error occurred in start\") async def stop(self)", "AbstractEventLoop = None, ) -> None: self.loop = loop or", "a protocol instance with the server. :param peer: Tuple of", "send to peers. The message is assumed to include the", "a message. :param msg: A bytes object representing the SBS", "instance from the server. This peer will no longer receive", "message is assumed to include the end of message delimiter.", "Exception( f\"Server can't send msg to non-existant peer: {peer}\" )", "then the message is broadcast to all peers. \"\"\" if", "is being iterating over. peers = list(self.protocols) for peer in", "= await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, )", "= 30003, backlog=100, loop: AbstractEventLoop = None, ) -> None:", "server \"\"\" if self.listener: # Avoid iterating over the protocols", "list(self.protocols) for peer in peers: prot = self.protocols.get(peer) if prot:", "asyncio import AbstractEventLoop logger = logging.getLogger(__name__) class Server(object): def __init__(", "peer: A specific peer to send the message to. Peer", "typing import Tuple from asyncio import AbstractEventLoop logger = logging.getLogger(__name__)", "(host:str, port:int). \"\"\" del self.protocols[peer] def send_message(self, msg: bytes, peer:", "in peers: prot = self.protocols.get(peer) if prot: prot.close() self.listener.close() def", "= self.protocols.get(peer) if prot: prot.send_message(msg) else: raise Exception( f\"Server can't", "prot.send_message(msg) else: raise Exception( f\"Server can't send msg to non-existant", "= list(self.protocols) for peer in peers: prot = self.protocols.get(peer) if", "stop(self) -> None: \"\"\" Stop the server \"\"\" if self.listener:", "self.protocols = {} async def start(self) -> None: \"\"\" Start", "self.protocols[peer] def send_message(self, msg: bytes, peer: Tuple[str, int] = None)", "def send_message(self, msg: bytes, peer: Tuple[str, int] = None) ->", "protocols dict which may change size # while it is", "message delimiter. :param peer: A specific peer to send the", "prot = self.protocols.get(peer) if prot: prot.send_message(msg) else: raise Exception( f\"Server", "host: str = \"localhost\", port: int = 30003, backlog=100, loop:", "prot.close() self.listener.close() def register_protocol( self, peer: Tuple[str, int], prot: \"SBSServerProtocol\"", "prot: prot.send_message(msg) else: raise Exception( f\"Server can't send msg to", "broadcast message to all peers for peer, prot in self.protocols.items():", "\"SBSServerProtocol\" ) -> None: \"\"\" Register a protocol instance with", "error occurred in start\") async def stop(self) -> None: \"\"\"", "{peer}\" ) else: # broadcast message to all peers for", "occurred in start\") async def stop(self) -> None: \"\"\" Stop", "Start the server \"\"\" try: self.listener = await self.loop.create_server( lambda:", "def register_protocol( self, peer: Tuple[str, int], prot: \"SBSServerProtocol\" ) ->", "logger.exception(\"Connection waiter Future was cancelled\") except Exception: logger.exception(\"An error occurred", "self.port = None self.backlog = backlog self.listener = None self.protocols", "format message to send to peers. The message is assumed", "Tuple[str, int]) -> None: \"\"\" De-register a protocol instance from", "peer, prot in self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server can't send", "def __init__( self, host: str = \"localhost\", port: int =", "Tuple of (host:str, port:int). \"\"\" del self.protocols[peer] def send_message(self, msg:", "\"\"\" Stop the server \"\"\" if self.listener: # Avoid iterating", "passed as 0 which means use # an ephemeral port.", "prot: \"SBSServerProtocol\" ) -> None: \"\"\" Register a protocol instance", "Peer is a Tuple of (host:str, port:int). If not specified", "This can be different from the # specified port if", "_, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter Future was", "logging.getLogger(__name__) class Server(object): def __init__( self, host: str = \"localhost\",", "logging import socket from . import protocol from typing import", "logger = logging.getLogger(__name__) class Server(object): def __init__( self, host: str", "all peers for peer, prot in self.protocols.items(): prot.send_message(msg) else: raise", "ephemeral port. assert len(self.listener.sockets) == 1 _, self.port = self.listener.sockets[0].getsockname()", "to send the message to. Peer is a Tuple of", "different from the # specified port if the port was", "port was passed as 0 which means use # an", "server. This peer will no longer receive messages. :param peer:", "socket from . import protocol from typing import Tuple from", "peers: prot = self.protocols.get(peer) if prot: prot.close() self.listener.close() def register_protocol(", "Tuple of (host:str, port:int). If not specified then the message", "\"\"\" del self.protocols[peer] def send_message(self, msg: bytes, peer: Tuple[str, int]", "self._requested_port = port self.port = None self.backlog = backlog self.listener", "Stop the server \"\"\" if self.listener: # Avoid iterating over", "except asyncio.CancelledError: logger.exception(\"Connection waiter Future was cancelled\") except Exception: logger.exception(\"An", "Avoid iterating over the protocols dict which may change size", "actual port in use. This can be different from the", "prot def deregister_protocol(self, peer: Tuple[str, int]) -> None: \"\"\" De-register", "import datetime import logging import socket from . import protocol", "self.protocols.get(peer) if prot: prot.close() self.listener.close() def register_protocol( self, peer: Tuple[str,", "Tuple from asyncio import AbstractEventLoop logger = logging.getLogger(__name__) class Server(object):", "Server(object): def __init__( self, host: str = \"localhost\", port: int", "async def stop(self) -> None: \"\"\" Stop the server \"\"\"", "bytes, peer: Tuple[str, int] = None) -> None: \"\"\" Send", "over the protocols dict which may change size # while", "# Fetch actual port in use. This can be different", "the server. :param peer: Tuple of (host:str, port:int). :param prot:", "server. :param peer: Tuple of (host:str, port:int). :param prot: a", "the server \"\"\" try: self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self),", "delimiter. :param peer: A specific peer to send the message", "may change size # while it is being iterating over.", "peer to send the message to. Peer is a Tuple", "in start\") async def stop(self) -> None: \"\"\" Stop the", "peers = list(self.protocols) for peer in peers: prot = self.protocols.get(peer)", "for peer in peers: prot = self.protocols.get(peer) if prot: prot.close()", "self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server # Fetch actual", "port in use. This can be different from the #", "specified port if the port was passed as 0 which", "the protocols dict which may change size # while it", "self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type:", "\"\"\" if self.listener: # Avoid iterating over the protocols dict", "if self.listener: # Avoid iterating over the protocols dict which", "the server. This peer will no longer receive messages. :param", "port:int). \"\"\" del self.protocols[peer] def send_message(self, msg: bytes, peer: Tuple[str,", "to. Peer is a Tuple of (host:str, port:int). If not", "prot.send_message(msg) else: raise Exception(\"Server can't send msg, no peers available\")", "object representing the SBS format message to send to peers.", "= loop or asyncio.get_event_loop() self.host = host self._requested_port = port", "async def start(self) -> None: \"\"\" Start the server \"\"\"", "no longer receive messages. :param peer: Tuple of (host:str, port:int).", "start(self) -> None: \"\"\" Start the server \"\"\" try: self.listener", "message. :param msg: A bytes object representing the SBS format", "\"\"\" Register a protocol instance with the server. :param peer:", "prot = self.protocols.get(peer) if prot: prot.close() self.listener.close() def register_protocol( self,", "SBSServerProtocol instance. \"\"\" self.protocols[peer] = prot def deregister_protocol(self, peer: Tuple[str,", "peer: Tuple of (host:str, port:int). \"\"\" del self.protocols[peer] def send_message(self,", "broadcast to all peers. \"\"\" if self.protocols: if peer: prot", "to all peers for peer, prot in self.protocols.items(): prot.send_message(msg) else:", "asyncio.Server # Fetch actual port in use. This can be", "the server \"\"\" if self.listener: # Avoid iterating over the", "deregister_protocol(self, peer: Tuple[str, int]) -> None: \"\"\" De-register a protocol", "include the end of message delimiter. :param peer: A specific", "message is broadcast to all peers. \"\"\" if self.protocols: if", "backlog=100, loop: AbstractEventLoop = None, ) -> None: self.loop =", "= \"localhost\", port: int = 30003, backlog=100, loop: AbstractEventLoop =", "Tuple[str, int] = None) -> None: \"\"\" Send a message.", "all peers. \"\"\" if self.protocols: if peer: prot = self.protocols.get(peer)", "an ephemeral port. assert len(self.listener.sockets) == 1 _, self.port =", "the end of message delimiter. :param peer: A specific peer", "prot: prot.close() self.listener.close() def register_protocol( self, peer: Tuple[str, int], prot:", "\"\"\" De-register a protocol instance from the server. This peer", "specific peer to send the message to. Peer is a", "Future was cancelled\") except Exception: logger.exception(\"An error occurred in start\")", ":param msg: A bytes object representing the SBS format message", "of message delimiter. :param peer: A specific peer to send", "= {} async def start(self) -> None: \"\"\" Start the", "for peer, prot in self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server can't", "int] = None) -> None: \"\"\" Send a message. :param", "prot: a SBSServerProtocol instance. \"\"\" self.protocols[peer] = prot def deregister_protocol(self,", "int], prot: \"SBSServerProtocol\" ) -> None: \"\"\" Register a protocol", "it is being iterating over. peers = list(self.protocols) for peer", "import socket from . import protocol from typing import Tuple", "logger.exception(\"An error occurred in start\") async def stop(self) -> None:", "class Server(object): def __init__( self, host: str = \"localhost\", port:", "start\") async def stop(self) -> None: \"\"\" Stop the server", ") else: # broadcast message to all peers for peer,", "port self.port = None self.backlog = backlog self.listener = None", "peer will no longer receive messages. :param peer: Tuple of", "peers. The message is assumed to include the end of", "to include the end of message delimiter. :param peer: A", "import logging import socket from . import protocol from typing", "self.loop = loop or asyncio.get_event_loop() self.host = host self._requested_port =", "a protocol instance from the server. This peer will no", "None: \"\"\" Start the server \"\"\" try: self.listener = await", "type: asyncio.Server # Fetch actual port in use. This can", "from the server. This peer will no longer receive messages.", "message to. Peer is a Tuple of (host:str, port:int). If", "which means use # an ephemeral port. assert len(self.listener.sockets) ==", "= None) -> None: \"\"\" Send a message. :param msg:", "asyncio.get_event_loop() self.host = host self._requested_port = port self.port = None", "= host self._requested_port = port self.port = None self.backlog =", "msg: bytes, peer: Tuple[str, int] = None) -> None: \"\"\"", "AbstractEventLoop logger = logging.getLogger(__name__) class Server(object): def __init__( self, host:", ":param peer: Tuple of (host:str, port:int). :param prot: a SBSServerProtocol", "self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter Future was cancelled\")", "The message is assumed to include the end of message", "peer: Tuple[str, int] = None) -> None: \"\"\" Send a", "peer: prot = self.protocols.get(peer) if prot: prot.send_message(msg) else: raise Exception(", "in self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server can't send msg, no", "over. peers = list(self.protocols) for peer in peers: prot =", "if the port was passed as 0 which means use", "raise Exception( f\"Server can't send msg to non-existant peer: {peer}\"", "longer receive messages. :param peer: Tuple of (host:str, port:int). \"\"\"", "if prot: prot.send_message(msg) else: raise Exception( f\"Server can't send msg", "A bytes object representing the SBS format message to send", "the message is broadcast to all peers. \"\"\" if self.protocols:", "if self.protocols: if peer: prot = self.protocols.get(peer) if prot: prot.send_message(msg)", "self.protocols[peer] = prot def deregister_protocol(self, peer: Tuple[str, int]) -> None:", "# while it is being iterating over. peers = list(self.protocols)", "self.listener.close() def register_protocol( self, peer: Tuple[str, int], prot: \"SBSServerProtocol\" )", "peer: Tuple[str, int]) -> None: \"\"\" De-register a protocol instance", "server \"\"\" try: self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host,", "asyncio.CancelledError: logger.exception(\"Connection waiter Future was cancelled\") except Exception: logger.exception(\"An error", "Exception: logger.exception(\"An error occurred in start\") async def stop(self) ->", "protocol instance from the server. This peer will no longer", "iterating over the protocols dict which may change size #", "of (host:str, port:int). If not specified then the message is", "import Tuple from asyncio import AbstractEventLoop logger = logging.getLogger(__name__) class", "self.host = host self._requested_port = port self.port = None self.backlog", "== 1 _, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter", "-> None: \"\"\" Start the server \"\"\" try: self.listener =", "Fetch actual port in use. This can be different from", "instance with the server. :param peer: Tuple of (host:str, port:int).", "send the message to. Peer is a Tuple of (host:str,", "self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) # type: asyncio.Server # Fetch", "loop or asyncio.get_event_loop() self.host = host self._requested_port = port self.port", "self.backlog = backlog self.listener = None self.protocols = {} async", "backlog=self.backlog, ) # type: asyncio.Server # Fetch actual port in", "self, host: str = \"localhost\", port: int = 30003, backlog=100,", "\"\"\" if self.protocols: if peer: prot = self.protocols.get(peer) if prot:", "peers. \"\"\" if self.protocols: if peer: prot = self.protocols.get(peer) if", "= prot def deregister_protocol(self, peer: Tuple[str, int]) -> None: \"\"\"", "int]) -> None: \"\"\" De-register a protocol instance from the", "None: \"\"\" Send a message. :param msg: A bytes object", "receive messages. :param peer: Tuple of (host:str, port:int). \"\"\" del", "= self.protocols.get(peer) if prot: prot.close() self.listener.close() def register_protocol( self, peer:", "self, peer: Tuple[str, int], prot: \"SBSServerProtocol\" ) -> None: \"\"\"", "the SBS format message to send to peers. The message", "from asyncio import AbstractEventLoop logger = logging.getLogger(__name__) class Server(object): def", "None: \"\"\" Stop the server \"\"\" if self.listener: # Avoid", "which may change size # while it is being iterating", ":param peer: Tuple of (host:str, port:int). \"\"\" del self.protocols[peer] def", "waiter Future was cancelled\") except Exception: logger.exception(\"An error occurred in", "of (host:str, port:int). \"\"\" del self.protocols[peer] def send_message(self, msg: bytes,", "len(self.listener.sockets) == 1 _, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection", "with the server. :param peer: Tuple of (host:str, port:int). :param", "instance. \"\"\" self.protocols[peer] = prot def deregister_protocol(self, peer: Tuple[str, int])", "will no longer receive messages. :param peer: Tuple of (host:str,", ") # type: asyncio.Server # Fetch actual port in use.", "If not specified then the message is broadcast to all", "size # while it is being iterating over. peers =", "(host:str, port:int). :param prot: a SBSServerProtocol instance. \"\"\" self.protocols[peer] =", "30003, backlog=100, loop: AbstractEventLoop = None, ) -> None: self.loop", "import AbstractEventLoop logger = logging.getLogger(__name__) class Server(object): def __init__( self,", "__init__( self, host: str = \"localhost\", port: int = 30003,", "peer in peers: prot = self.protocols.get(peer) if prot: prot.close() self.listener.close()", "loop: AbstractEventLoop = None, ) -> None: self.loop = loop", "(host:str, port:int). If not specified then the message is broadcast", "self.protocols: if peer: prot = self.protocols.get(peer) if prot: prot.send_message(msg) else:", "the port was passed as 0 which means use #", "port if the port was passed as 0 which means", "int = 30003, backlog=100, loop: AbstractEventLoop = None, ) ->", "= backlog self.listener = None self.protocols = {} async def", "msg to non-existant peer: {peer}\" ) else: # broadcast message", "0 which means use # an ephemeral port. assert len(self.listener.sockets)", "self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server can't send msg, no peers", "Tuple of (host:str, port:int). :param prot: a SBSServerProtocol instance. \"\"\"", "to all peers. \"\"\" if self.protocols: if peer: prot =", "-> None: \"\"\" Send a message. :param msg: A bytes", "peer: {peer}\" ) else: # broadcast message to all peers", "self.listener: # Avoid iterating over the protocols dict which may", "# type: asyncio.Server # Fetch actual port in use. This", "peers for peer, prot in self.protocols.items(): prot.send_message(msg) else: raise Exception(\"Server", "None: \"\"\" De-register a protocol instance from the server. This", "self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter Future was cancelled\") except Exception:", "= None self.protocols = {} async def start(self) -> None:", "= port self.port = None self.backlog = backlog self.listener =", "A specific peer to send the message to. Peer is", "def deregister_protocol(self, peer: Tuple[str, int]) -> None: \"\"\" De-register a", "or asyncio.get_event_loop() self.host = host self._requested_port = port self.port =", "bytes object representing the SBS format message to send to", "port. assert len(self.listener.sockets) == 1 _, self.port = self.listener.sockets[0].getsockname() except", "port: int = 30003, backlog=100, loop: AbstractEventLoop = None, )", "if prot: prot.close() self.listener.close() def register_protocol( self, peer: Tuple[str, int],", "msg: A bytes object representing the SBS format message to", "= logging.getLogger(__name__) class Server(object): def __init__( self, host: str =", "if peer: prot = self.protocols.get(peer) if prot: prot.send_message(msg) else: raise", "specified then the message is broadcast to all peers. \"\"\"", "= self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter Future was cancelled\") except", "1 _, self.port = self.listener.sockets[0].getsockname() except asyncio.CancelledError: logger.exception(\"Connection waiter Future", "backlog self.listener = None self.protocols = {} async def start(self)", "use. This can be different from the # specified port", "register_protocol( self, peer: Tuple[str, int], prot: \"SBSServerProtocol\" ) -> None:", "peer: Tuple of (host:str, port:int). :param prot: a SBSServerProtocol instance.", "change size # while it is being iterating over. peers", "del self.protocols[peer] def send_message(self, msg: bytes, peer: Tuple[str, int] =", "messages. :param peer: Tuple of (host:str, port:int). \"\"\" del self.protocols[peer]", "\"\"\" try: self.listener = await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port,", "-> None: \"\"\" De-register a protocol instance from the server.", "None: self.loop = loop or asyncio.get_event_loop() self.host = host self._requested_port", "def start(self) -> None: \"\"\" Start the server \"\"\" try:", "from the # specified port if the port was passed", "await self.loop.create_server( lambda: protocol.SBSServerProtocol(self), self.host, self._requested_port, family=socket.AF_INET, backlog=self.backlog, ) #", "dict which may change size # while it is being", "send msg to non-existant peer: {peer}\" ) else: # broadcast", "None self.protocols = {} async def start(self) -> None: \"\"\"", "was passed as 0 which means use # an ephemeral" ]
[ "named_sinks: Optional[List[str]] = None response: Dict[ str, Any ] =", "finding key\") def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str", "import uuid from enum import Enum from typing import List,", "field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None response: Dict[ str, Any", "...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import Finding, BaseBlock class EventType(Enum):", "existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding existing finding. finding_key:", "example stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler] = None def", "ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None # Right now: # 1.", "because of hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str, Finding] =", "the pydantic PR that addresses those issues is merged, this", "findings: Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None", "import logging import uuid from enum import Enum from typing", "Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None response:", "{finding_key} new finding: {finding}\" ) self.findings[finding_key] = finding @staticmethod def", "import dataclass, field from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import", "class # (note that we need to integrate with dataclasses", "finding_key: str = None): if ( not finding_key ): #", "new finding: {finding}\" ) self.findings[finding_key] = finding @staticmethod def from_params(params:", "this can't be a pydantic BaseModel because of various pydantic", "finding default fields according to the event type\"\"\" return Finding(title=\"Generic", "not finding_key ): # user didn't specify a key, so", "Optional, Dict, Any from dataclasses import dataclass, field from pydantic", "event type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\") def add_enrichment(", "Randomise it finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding:", ") self.findings[finding_key] = finding @staticmethod def from_params(params: ExecutionEventBaseParams) -> Optional[\"ExecutionBaseEvent\"]:", "= str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding existing", "create_default_finding(self) -> Finding: \"\"\"Create finding default fields according to the", "1 PROMETHEUS = 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4", "import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS =", "typing import List, Optional, Dict, Any from dataclasses import dataclass,", "Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None response: Dict[", "): finding = self.findings.get(finding_key) if not finding: finding = self.create_default_finding()", "pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) #", "annotations) def add_finding(self, finding: Finding, finding_key: str = None): if", "= 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel):", "aggregation_key=\"Generic finding key\") def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key:", "\"DEFAULT\", ): finding = self.findings.get(finding_key) if not finding: finding =", "that we need to integrate with dataclasses because of hikaru)", "Optional[List[str]] = None # Right now: # 1. this is", "# once the pydantic PR that addresses those issues is", "None response: Dict[ str, Any ] = None # Response", "existing_finding: logging.warning( f\"Overriding existing finding. finding_key: {finding_key} new finding: {finding}\"", "import Enum from typing import List, Optional, Dict, Any from", "= self.findings.get(finding_key) if not finding: finding = self.create_default_finding() self.findings[finding_key] =", "accessed by key. Randomise it finding_key = str(uuid.uuid4()) existing_finding =", "import List, Optional, Dict, Any from dataclasses import dataclass, field", "_scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler =", "if not finding: finding = self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks,", "stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self,", "finding: {finding}\" ) self.findings[finding_key] = finding @staticmethod def from_params(params: ExecutionEventBaseParams)", "# 1. this is a dataclass but we need to", "make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/ #", "4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None # Right now:", "pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR that", "once the pydantic PR that addresses those issues is merged,", "user didn't specify a key, so this finding shouldn't be", "should be a pydantic class # (note that we need", "need to make all fields optional in subclasses because of", "to integrate with dataclasses because of hikaru) @dataclass class ExecutionBaseEvent:", "a pydantic class # (note that we need to integrate", "class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None # Right now: #", "hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda: {})", "if ( not finding_key ): # user didn't specify a", "finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding", "from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base", "manual triggers for example stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler]", "scheduler: PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self) -> PlaybooksScheduler: return", "the event type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\") def", "Dict[ str, Any ] = None # Response returned to", "enum import Enum from typing import List, Optional, Dict, Any", "we need to make all fields optional in subclasses because", "SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None #", "addresses those issues is merged, this should be a pydantic", "logging.warning( f\"Overriding existing finding. finding_key: {finding_key} new finding: {finding}\" )", "dataclasses import dataclass, field from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler", "( not finding_key ): # user didn't specify a key,", "None # Right now: # 1. this is a dataclass", "bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR that addresses", "str = \"DEFAULT\", ): finding = self.findings.get(finding_key) if not finding:", "dataclasses because of hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str, Finding]", "finding = self.findings.get(finding_key) if not finding: finding = self.create_default_finding() self.findings[finding_key]", "\"\"\"Create finding default fields according to the event type\"\"\" return", "according to the event type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding", "this finding shouldn't be accessed by key. Randomise it finding_key", "Finding: \"\"\"Create finding default fields according to the event type\"\"\"", "Finding\", aggregation_key=\"Generic finding key\") def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None,", "field from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from", "= None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler def", "2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks:", "-> PlaybooksScheduler: return self._scheduler def create_default_finding(self) -> Finding: \"\"\"Create finding", "get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def create_default_finding(self) -> Finding: \"\"\"Create", "uuid from enum import Enum from typing import List, Optional,", "self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding,", "finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding, finding_key: str =", "Dict, Any from dataclasses import dataclass, field from pydantic import", "= None # Right now: # 1. this is a", "= None response: Dict[ str, Any ] = None #", "def create_default_finding(self) -> Finding: \"\"\"Create finding default fields according to", "= finding @staticmethod def from_params(params: ExecutionEventBaseParams) -> Optional[\"ExecutionBaseEvent\"]: return ExecutionBaseEvent(named_sinks=params.named_sinks)", "from enum import Enum from typing import List, Optional, Dict,", "that addresses those issues is merged, this should be a", "def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def create_default_finding(self) -> Finding:", "various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR", "so this finding shouldn't be accessed by key. Randomise it", "https://stackoverflow.com/questions/51575931/ # 2. this can't be a pydantic BaseModel because", "to caller. For admission or manual triggers for example stop_processing:", "merged, this should be a pydantic class # (note that", "finding: finding = self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def", "class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2 MANUAL_TRIGGER =", "from typing import List, Optional, Dict, Any from dataclasses import", "self._scheduler = scheduler def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def", "triggers for example stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler] =", "of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic", "those issues is merged, this should be a pydantic class", "EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2 MANUAL_TRIGGER = 3", "because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once the", "# 2. this can't be a pydantic BaseModel because of", "KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER", "BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2 MANUAL_TRIGGER", "named_sinks: Optional[List[str]] = None # Right now: # 1. this", "pydantic class # (note that we need to integrate with", "= self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding existing finding. finding_key: {finding_key}", "key, so this finding shouldn't be accessed by key. Randomise", "is merged, this should be a pydantic class # (note", "<gh_stars>0 import logging import uuid from enum import Enum from", "with dataclasses because of hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str,", "None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self)", "of hikaru) @dataclass class ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda:", "self.findings[finding_key] = finding @staticmethod def from_params(params: ExecutionEventBaseParams) -> Optional[\"ExecutionBaseEvent\"]: return", "because of https://stackoverflow.com/questions/51575931/ # 2. this can't be a pydantic", "integrate with dataclasses because of hikaru) @dataclass class ExecutionBaseEvent: findings:", "str, Any ] = None # Response returned to caller.", "= 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None # Right", "not finding: finding = self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations)", "finding shouldn't be accessed by key. Randomise it finding_key =", "f\"Overriding existing finding. finding_key: {finding_key} new finding: {finding}\" ) self.findings[finding_key]", "PlaybooksScheduler: return self._scheduler def create_default_finding(self) -> Finding: \"\"\"Create finding default", "Response returned to caller. For admission or manual triggers for", "finding_key: {finding_key} new finding: {finding}\" ) self.findings[finding_key] = finding @staticmethod", "dataclass but we need to make all fields optional in", "key. Randomise it finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if", "PlaybooksScheduler from ..reporting.base import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE =", "(see https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR that addresses those", "Optional[List[str]] = None response: Dict[ str, Any ] = None", "but we need to make all fields optional in subclasses", "] = None # Response returned to caller. For admission", "returned to caller. For admission or manual triggers for example", "fields optional in subclasses because of https://stackoverflow.com/questions/51575931/ # 2. this", "List[BaseBlock], annotations=None, finding_key: str = \"DEFAULT\", ): finding = self.findings.get(finding_key)", "def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self) ->", "= self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding:", "= None): if ( not finding_key ): # user didn't", "= None # Response returned to caller. For admission or", "MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]]", "= 3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] =", "https://github.com/samuelcolvin/pydantic/pull/2557) # once the pydantic PR that addresses those issues", "default fields according to the event type\"\"\" return Finding(title=\"Generic Finding\",", "finding. finding_key: {finding_key} new finding: {finding}\" ) self.findings[finding_key] = finding", "PROMETHEUS = 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER = 4 class", "a pydantic BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557)", "def add_finding(self, finding: Finding, finding_key: str = None): if (", "Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler", "of https://stackoverflow.com/questions/51575931/ # 2. this can't be a pydantic BaseModel", "or manual triggers for example stop_processing: bool = False _scheduler:", "set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self) -> PlaybooksScheduler:", "optional in subclasses because of https://stackoverflow.com/questions/51575931/ # 2. this can't", "finding: Finding, finding_key: str = None): if ( not finding_key", "scheduler def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def create_default_finding(self) ->", "1. this is a dataclass but we need to make", "be a pydantic class # (note that we need to", "= field(default_factory=lambda: {}) named_sinks: Optional[List[str]] = None response: Dict[ str,", "response: Dict[ str, Any ] = None # Response returned", "Finding, finding_key: str = None): if ( not finding_key ):", "BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import Finding, BaseBlock", "2. this can't be a pydantic BaseModel because of various", "is a dataclass but we need to make all fields", "None): if ( not finding_key ): # user didn't specify", "finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding, finding_key: str = None):", "didn't specify a key, so this finding shouldn't be accessed", "For admission or manual triggers for example stop_processing: bool =", "Any from dataclasses import dataclass, field from pydantic import BaseModel", "to the event type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\")", "specify a key, so this finding shouldn't be accessed by", "= False _scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler: PlaybooksScheduler):", "bool = False _scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler:", "existing finding. finding_key: {finding_key} new finding: {finding}\" ) self.findings[finding_key] =", "self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str = \"DEFAULT\", ): finding", "a dataclass but we need to make all fields optional", "-> Finding: \"\"\"Create finding default fields according to the event", "all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/ # 2.", "Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\") def add_enrichment( self, enrichment_blocks: List[BaseBlock],", "shouldn't be accessed by key. Randomise it finding_key = str(uuid.uuid4())", "return self._scheduler def create_default_finding(self) -> Finding: \"\"\"Create finding default fields", "dataclass, field from pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler", "can't be a pydantic BaseModel because of various pydantic bugs", "finding_key: str = \"DEFAULT\", ): finding = self.findings.get(finding_key) if not", "= 1 PROMETHEUS = 2 MANUAL_TRIGGER = 3 SCHEDULED_TRIGGER =", "this is a dataclass but we need to make all", "= scheduler def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler def create_default_finding(self)", "None # Response returned to caller. For admission or manual", "key\") def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str =", "ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]] =", "..reporting.base import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS", "add_finding(self, finding: Finding, finding_key: str = None): if ( not", "we need to integrate with dataclasses because of hikaru) @dataclass", "return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\") def add_enrichment( self, enrichment_blocks:", "pydantic import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import", "Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1 PROMETHEUS = 2", "Right now: # 1. this is a dataclass but we", "def add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str = \"DEFAULT\",", "# user didn't specify a key, so this finding shouldn't", "be accessed by key. Randomise it finding_key = str(uuid.uuid4()) existing_finding", "{finding}\" ) self.findings[finding_key] = finding @staticmethod def from_params(params: ExecutionEventBaseParams) ->", "# Right now: # 1. this is a dataclass but", "to make all fields optional in subclasses because of https://stackoverflow.com/questions/51575931/", "by key. Randomise it finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key)", "issues is merged, this should be a pydantic class #", "class ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks: Optional[List[str]]", "# Response returned to caller. For admission or manual triggers", "annotations=None, finding_key: str = \"DEFAULT\", ): finding = self.findings.get(finding_key) if", "finding = self.create_default_finding() self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self,", "caller. For admission or manual triggers for example stop_processing: bool", "Any ] = None # Response returned to caller. For", "be a pydantic BaseModel because of various pydantic bugs (see", "need to integrate with dataclasses because of hikaru) @dataclass class", "from dataclasses import dataclass, field from pydantic import BaseModel from", "type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic finding key\") def add_enrichment( self,", "(note that we need to integrate with dataclasses because of", "enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str = \"DEFAULT\", ): finding =", "3 SCHEDULED_TRIGGER = 4 class ExecutionEventBaseParams(BaseModel): named_sinks: Optional[List[str]] = None", "a key, so this finding shouldn't be accessed by key.", "import BaseModel from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import Finding,", "pydantic PR that addresses those issues is merged, this should", "self._scheduler def create_default_finding(self) -> Finding: \"\"\"Create finding default fields according", "fields according to the event type\"\"\" return Finding(title=\"Generic Finding\", aggregation_key=\"Generic", "{}) named_sinks: Optional[List[str]] = None response: Dict[ str, Any ]", "self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding existing finding. finding_key: {finding_key} new", "finding_key ): # user didn't specify a key, so this", "= finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding, finding_key: str", "# (note that we need to integrate with dataclasses because", "False _scheduler: Optional[PlaybooksScheduler] = None def set_scheduler(self, scheduler: PlaybooksScheduler): self._scheduler", "if existing_finding: logging.warning( f\"Overriding existing finding. finding_key: {finding_key} new finding:", "this should be a pydantic class # (note that we", "PlaybooksScheduler): self._scheduler = scheduler def get_scheduler(self) -> PlaybooksScheduler: return self._scheduler", "= \"DEFAULT\", ): finding = self.findings.get(finding_key) if not finding: finding", "add_enrichment( self, enrichment_blocks: List[BaseBlock], annotations=None, finding_key: str = \"DEFAULT\", ):", "List, Optional, Dict, Any from dataclasses import dataclass, field from", "in subclasses because of https://stackoverflow.com/questions/51575931/ # 2. this can't be", "@dataclass class ExecutionBaseEvent: findings: Dict[str, Finding] = field(default_factory=lambda: {}) named_sinks:", "now: # 1. this is a dataclass but we need", "import PlaybooksScheduler from ..reporting.base import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE", "for example stop_processing: bool = False _scheduler: Optional[PlaybooksScheduler] = None", "it finding_key = str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning(", "self.findings[finding_key] = finding finding.add_enrichment(enrichment_blocks, annotations) def add_finding(self, finding: Finding, finding_key:", "self.findings.get(finding_key) if not finding: finding = self.create_default_finding() self.findings[finding_key] = finding", "str = None): if ( not finding_key ): # user", "subclasses because of https://stackoverflow.com/questions/51575931/ # 2. this can't be a", "PR that addresses those issues is merged, this should be", "from ...integrations.scheduled.playbook_scheduler import PlaybooksScheduler from ..reporting.base import Finding, BaseBlock class", "from ..reporting.base import Finding, BaseBlock class EventType(Enum): KUBERNETES_TOPOLOGY_CHANGE = 1", "logging import uuid from enum import Enum from typing import", "): # user didn't specify a key, so this finding", "Enum from typing import List, Optional, Dict, Any from dataclasses", "admission or manual triggers for example stop_processing: bool = False", "BaseModel because of various pydantic bugs (see https://github.com/samuelcolvin/pydantic/pull/2557) # once", "str(uuid.uuid4()) existing_finding = self.findings.get(finding_key) if existing_finding: logging.warning( f\"Overriding existing finding." ]
[ "import ( FloatField, StringField, ListField, URLField, ObjectIdField, ) class Shop(Document):", "\"bike\"} ID = ObjectIdField() name = StringField() brand = StringField()", "StringField() brand = StringField() year = StringField() size = ListField(StringField())", "URLField, ObjectIdField, ) class Shop(Document): meta = {\"collection\": \"shop\"} ID", "= StringField() year = StringField() size = ListField(StringField()) wheel_size =", "from mongoengine.fields import ( FloatField, StringField, ListField, URLField, ObjectIdField, )", "Shop(Document): meta = {\"collection\": \"shop\"} ID = ObjectIdField() name =", "<filename>examples/django_mongoengine/bike/models.py from mongoengine import Document from mongoengine.fields import ( FloatField,", ") class Shop(Document): meta = {\"collection\": \"shop\"} ID = ObjectIdField()", "Bike(Document): meta = {\"collection\": \"bike\"} ID = ObjectIdField() name =", "= {\"collection\": \"bike\"} ID = ObjectIdField() name = StringField() brand", "address = StringField() website = URLField() class Bike(Document): meta =", "StringField() size = ListField(StringField()) wheel_size = FloatField() type = StringField()", "= StringField() address = StringField() website = URLField() class Bike(Document):", "import Document from mongoengine.fields import ( FloatField, StringField, ListField, URLField,", "URLField() class Bike(Document): meta = {\"collection\": \"bike\"} ID = ObjectIdField()", "StringField, ListField, URLField, ObjectIdField, ) class Shop(Document): meta = {\"collection\":", "ObjectIdField, ) class Shop(Document): meta = {\"collection\": \"shop\"} ID =", "= ObjectIdField() name = StringField() address = StringField() website =", "{\"collection\": \"bike\"} ID = ObjectIdField() name = StringField() brand =", "= StringField() size = ListField(StringField()) wheel_size = FloatField() type =", "ID = ObjectIdField() name = StringField() brand = StringField() year", "meta = {\"collection\": \"shop\"} ID = ObjectIdField() name = StringField()", "ID = ObjectIdField() name = StringField() address = StringField() website", "{\"collection\": \"shop\"} ID = ObjectIdField() name = StringField() address =", "\"shop\"} ID = ObjectIdField() name = StringField() address = StringField()", "class Bike(Document): meta = {\"collection\": \"bike\"} ID = ObjectIdField() name", "Document from mongoengine.fields import ( FloatField, StringField, ListField, URLField, ObjectIdField,", "name = StringField() brand = StringField() year = StringField() size", "mongoengine.fields import ( FloatField, StringField, ListField, URLField, ObjectIdField, ) class", "class Shop(Document): meta = {\"collection\": \"shop\"} ID = ObjectIdField() name", "website = URLField() class Bike(Document): meta = {\"collection\": \"bike\"} ID", "StringField() year = StringField() size = ListField(StringField()) wheel_size = FloatField()", "brand = StringField() year = StringField() size = ListField(StringField()) wheel_size", "year = StringField() size = ListField(StringField()) wheel_size = FloatField() type", "meta = {\"collection\": \"bike\"} ID = ObjectIdField() name = StringField()", "mongoengine import Document from mongoengine.fields import ( FloatField, StringField, ListField,", "StringField() address = StringField() website = URLField() class Bike(Document): meta", "( FloatField, StringField, ListField, URLField, ObjectIdField, ) class Shop(Document): meta", "= {\"collection\": \"shop\"} ID = ObjectIdField() name = StringField() address", "= URLField() class Bike(Document): meta = {\"collection\": \"bike\"} ID =", "= ObjectIdField() name = StringField() brand = StringField() year =", "ListField, URLField, ObjectIdField, ) class Shop(Document): meta = {\"collection\": \"shop\"}", "ObjectIdField() name = StringField() address = StringField() website = URLField()", "name = StringField() address = StringField() website = URLField() class", "= StringField() brand = StringField() year = StringField() size =", "from mongoengine import Document from mongoengine.fields import ( FloatField, StringField,", "ObjectIdField() name = StringField() brand = StringField() year = StringField()", "FloatField, StringField, ListField, URLField, ObjectIdField, ) class Shop(Document): meta =", "= StringField() website = URLField() class Bike(Document): meta = {\"collection\":", "StringField() website = URLField() class Bike(Document): meta = {\"collection\": \"bike\"}" ]
[ "from ._movement import Movement from .path import MovementPath from .paths", "import Movement from .path import MovementPath from .paths import MovementPaths", "._movement import Movement from .path import MovementPath from .paths import" ]
[ "\"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern", "on the server and adds it to the container. Returns", "list of resources can be retrieved from the server using", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "with all currently retrieved fixedClassifier resources using find and the", "an iterator or index Raises ------ - NotFoundError: The requested", "added fixedClassifier resources available through an iterator or index Raises", "The FixedClassifier class encapsulates a list of fixedClassifier resources that", "------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class Raises", "() _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP =", "resources using find and the newly added fixedClassifier resources available", "resources from the server. To retrieve an exact match ensure", "a new fixedClassifier resource on the server and adds it", "# Copyright 1997 - 2020 by IXIA Keysight # #", "import Base from uhd_restpy.files import Files from typing import List,", "\"\"\" __slots__ = () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = {", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "user. A list of resources can be retrieved from the", "packet must match all packets in order to be impaired", "\"\"\"Adds a new fixedClassifier resource on the server and adds", "permission notice shall be included in # all copies or", "deal in the Software without restriction, including without limitation #", "find(self): \"\"\"Finds and retrieves fixedClassifier resources from the server. All", "are evaluated on the server using regex. The named parameters", "profile to. If there are multiple patterns enabled, they are", "By default the find method takes no parameters and will", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "the server using the FixedClassifier.find() method. The list can be", "ends with $ By default the find method takes no", "named parameters can be used to selectively retrieve fixedClassifier resources", "# # The above copyright notice and this permission notice", "parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self): \"\"\" Returns", "the newly added fixedClassifier resources available through an iterator or", "the FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\" __slots__ = () _SDM_NAME", "else: return Pattern(self) def add(self): \"\"\"Adds a new fixedClassifier resource", "copies of the Software, and to permit persons to whom", "the server. To retrieve an exact match ensure the parameter", "hereby granted, free of charge, to any person obtaining a", "this profile. The FixedClassifier class encapsulates a list of fixedClassifier", "method takes no parameters and will retrieve all fixedClassifier resources", "to deal in the Software without restriction, including without limitation", "DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from", "_SDM_ATT_MAP = { } _SDM_ENUM_MAP = { } def __init__(self,", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "Pattern(self): \"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the", "to selectively retrieve fixedClassifier resources from the server. To retrieve", "# all copies or substantial portions of the Software. #", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "ANDed: each packet must match all packets in order to", "\"Software\"), # to deal in the Software without restriction, including", "The named parameters can be used to selectively retrieve fixedClassifier", "included in # all copies or substantial portions of the", "a single instance of fixedClassifier data from the server. Args", "impaired by this profile. The FixedClassifier class encapsulates a list", "self: This instance with matching fixedClassifier resources retrieved from the", "profile. The FixedClassifier class encapsulates a list of fixedClassifier resources", "- ServerError: The server has encountered an uncategorized error condition", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "can be used to selectively retrieve fixedClassifier resources from the", "including without limitation # the rights to use, copy, modify,", "fixedClassifier resources available through an iterator or index Raises ------", "using find and the newly added fixedClassifier resources available through", "patterns enabled, they are ANDed: each packet must match all", "Pattern(self) def add(self): \"\"\"Adds a new fixedClassifier resource on the", "iterator or index Raises ------ - NotFoundError: The requested resource", "notice and this permission notice shall be included in #", "server using the FixedClassifier.find() method. The list can be managed", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "fixedClassifier resources retrieved from the server available through an iterator", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "from the server. Args ---- - href (str): An href", "class Raises ------ - ServerError: The server has encountered an", "Software, and to permit persons to whom the # Software", "using the FixedClassifier.find() method. The list can be managed by", "all copies or substantial portions of the Software. # #", "uncategorized error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href):", "USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base", "evaluated on the server using regex. The named parameters can", "\"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None) is not", "a list of fixedClassifier resources that are managed by the", "Software is furnished to do so, subject to the following", "All named parameters are evaluated on the server using regex.", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "the find method takes no parameters and will retrieve all", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "will retrieve all fixedClassifier resources from the server. Returns -------", "sell copies of the Software, and to permit persons to", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "managed by the user. A list of resources can be", "are multiple patterns enabled, they are ANDed: each packet must", "parameters can be used to selectively retrieve fixedClassifier resources from", "typing import List, Any, Union class FixedClassifier(Base): \"\"\"Specifies the packets", "packets to apply this profile to. If there are multiple", "the server. Args ---- - href (str): An href to", "fixedClassifier resources from the server. To retrieve an exact match", "list_op) @property def Pattern(self): \"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An", "starts with ^ and ends with $ By default the", "retrieve fixedClassifier resources from the server. To retrieve an exact", "encountered an uncategorized error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern", "an uncategorized error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self):", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "self._properties.get('Pattern', None) is not None: return self._properties.get('Pattern') else: return Pattern(self)", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "has encountered an uncategorized error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))", "exact match ensure the parameter value starts with ^ and", "notice shall be included in # all copies or substantial", "{ } def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property", "add(self): \"\"\"Adds a new fixedClassifier resource on the server and", "to permit persons to whom the # Software is furnished", "instance with matching fixedClassifier resources retrieved from the server available", "are ANDed: each packet must match all packets in order", "instance with all currently retrieved fixedClassifier resources using find and", "by IXIA Keysight # # Permission is hereby granted, free", "with ^ and ends with $ By default the find", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "to be retrieved Returns ------- - self: This instance with", "and this permission notice shall be included in # all", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "there are multiple patterns enabled, they are ANDed: each packet", "_SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent,", "2020 by IXIA Keysight # # Permission is hereby granted,", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "retrieve an exact match ensure the parameter value starts with", "default the find method takes no parameters and will retrieve", "self: This instance with the fixedClassifier resources from the server", "obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class Raises ------ -", "and to permit persons to whom the # Software is", "instance to be retrieved Returns ------- - self: This instance", "following conditions: # # The above copyright notice and this", "new fixedClassifier resource on the server and adds it to", "This instance with all currently retrieved fixedClassifier resources using find", "Returns ------- - self: This instance with the fixedClassifier resources", "conditions: # # The above copyright notice and this permission", "instance of fixedClassifier data from the server. Args ---- -", "condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes all the", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "and associated documentation files (the \"Software\"), # to deal in", "Union class FixedClassifier(Base): \"\"\"Specifies the packets to apply this profile", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "condition \"\"\" self._delete() def find(self): \"\"\"Finds and retrieves fixedClassifier resources", "(the \"Software\"), # to deal in the Software without restriction,", "selectively retrieve fixedClassifier resources from the server. To retrieve an", "super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self): \"\"\" Returns ------- -", "index Raises ------ - NotFoundError: The requested resource does not", "(str): An href to the instance to be retrieved Returns", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "exist on the server - ServerError: The server has encountered", "the Software, and to permit persons to whom the #", "uncategorized error condition \"\"\" self._delete() def find(self): \"\"\"Finds and retrieves", "available through an iterator or index Raises ------ - ServerError:", "rights to use, copy, modify, merge, publish, distribute, sublicense, #", "To retrieve an exact match ensure the parameter value starts", "with matching fixedClassifier resources retrieved from the server available through", "- self: This instance with matching fixedClassifier resources retrieved from", "be included in # all copies or substantial portions of", "return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): \"\"\"Retrieves a single instance", "is hereby granted, free of charge, to any person obtaining", "associated documentation files (the \"Software\"), # to deal in the", "= { } _SDM_ENUM_MAP = { } def __init__(self, parent,", "def read(self, href): \"\"\"Retrieves a single instance of fixedClassifier data", "\"\"\"Retrieves a single instance of fixedClassifier data from the server.", "they are ANDed: each packet must match all packets in", "retrieved from the server available through an iterator or index", "the packets to apply this profile to. If there are", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "------ - NotFoundError: The requested resource does not exist on", "list can be managed by using the FixedClassifier.add() and FixedClassifier.remove()", "fixedClassifier resources using find and the newly added fixedClassifier resources", "person obtaining a copy # of this software and associated", "# # Permission is hereby granted, free of charge, to", "OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import", "server has encountered an uncategorized error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP,", "and retrieves fixedClassifier resources from the server. All named parameters", "the # Software is furnished to do so, subject to", "fixedClassifier resources that are managed by the user. A list", "subject to the following conditions: # # The above copyright", "and/or sell copies of the Software, and to permit persons", "href to the instance to be retrieved Returns ------- -", "server. Returns ------- - self: This instance with matching fixedClassifier", "Keysight # # Permission is hereby granted, free of charge,", "with $ By default the find method takes no parameters", "the FixedClassifier.find() method. The list can be managed by using", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from", "\"\"\" self._delete() def find(self): \"\"\"Finds and retrieves fixedClassifier resources from", "or index Raises ------ - ServerError: The server has encountered", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "from uhd_restpy.files import Files from typing import List, Any, Union", "self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes all the contained fixedClassifier resources", "must match all packets in order to be impaired by", "an uncategorized error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if", "Any, Union class FixedClassifier(Base): \"\"\"Specifies the packets to apply this", "\"\"\"Deletes all the contained fixedClassifier resources in this instance from", "documentation files (the \"Software\"), # to deal in the Software", "LICENSE # # Copyright 1997 - 2020 by IXIA Keysight", "or substantial portions of the Software. # # THE SOFTWARE", "to be impaired by this profile. The FixedClassifier class encapsulates", "modify, merge, publish, distribute, sublicense, # and/or sell copies of", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "self).__init__(parent, list_op) @property def Pattern(self): \"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern):", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "read(self, href): \"\"\"Retrieves a single instance of fixedClassifier data from", "Raises ------ - ServerError: The server has encountered an uncategorized", "encountered an uncategorized error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "multiple patterns enabled, they are ANDed: each packet must match", "server using regex. The named parameters can be used to", "} def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def", "instance with the fixedClassifier resources from the server available through", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "index Raises ------ - ServerError: The server has encountered an", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "resources from the server available through an iterator or index", "# # Copyright 1997 - 2020 by IXIA Keysight #", "server has encountered an uncategorized error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "$ By default the find method takes no parameters and", "- obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class Raises ------", "the server and adds it to the container. Returns -------", "class FixedClassifier(Base): \"\"\"Specifies the packets to apply this profile to.", "using the FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\" __slots__ = ()", "return Pattern(self) def add(self): \"\"\"Adds a new fixedClassifier resource on", "be retrieved from the server using the FixedClassifier.find() method. The", "The server has encountered an uncategorized error condition \"\"\" from", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "the server. All named parameters are evaluated on the server", "- NotFoundError: The requested resource does not exist on the", "^ and ends with $ By default the find method", "- href (str): An href to the instance to be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "from typing import List, Any, Union class FixedClassifier(Base): \"\"\"Specifies the", "from the server. Returns ------- - self: This instance with", "OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base", "has encountered an uncategorized error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import", "__slots__ = () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = { }", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "\"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes all the contained", "of the Pattern class Raises ------ - ServerError: The server", "restriction, including without limitation # the rights to use, copy,", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "- self: This instance with all currently retrieved fixedClassifier resources", "Pattern if self._properties.get('Pattern', None) is not None: return self._properties.get('Pattern') else:", "the server - ServerError: The server has encountered an uncategorized", "Base from uhd_restpy.files import Files from typing import List, Any,", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "# Software is furnished to do so, subject to the", "to the container. Returns ------- - self: This instance with", "matching fixedClassifier resources retrieved from the server available through an", "software and associated documentation files (the \"Software\"), # to deal", "SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from", "This instance with matching fixedClassifier resources retrieved from the server", "currently retrieved fixedClassifier resources using find and the newly added", "server. Args ---- - href (str): An href to the", "import Files from typing import List, Any, Union class FixedClassifier(Base):", "to whom the # Software is furnished to do so,", "The server has encountered an uncategorized error condition \"\"\" self._delete()", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): \"\"\"Retrieves a single instance of", "by the user. A list of resources can be retrieved", "newly added fixedClassifier resources available through an iterator or index", "the parameter value starts with ^ and ends with $", "so, subject to the following conditions: # # The above", "condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): \"\"\"Retrieves a", "the instance to be retrieved Returns ------- - self: This", "takes no parameters and will retrieve all fixedClassifier resources from", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "the contained fixedClassifier resources in this instance from the server.", "Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of the Pattern class", "import Pattern if self._properties.get('Pattern', None) is not None: return self._properties.get('Pattern')", "the fixedClassifier resources from the server available through an iterator", "an iterator or index Raises ------ - ServerError: The server", "of resources can be retrieved from the server using the", "the following conditions: # # The above copyright notice and", "by this profile. The FixedClassifier class encapsulates a list of", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "from the server. All named parameters are evaluated on the", "condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None) is", "this software and associated documentation files (the \"Software\"), # to", "self: This instance with all currently retrieved fixedClassifier resources using", "be used to selectively retrieve fixedClassifier resources from the server.", "all the contained fixedClassifier resources in this instance from the", "# THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import", "methods. \"\"\" __slots__ = () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP =", "list of fixedClassifier resources that are managed by the user.", "Raises ------ - NotFoundError: The requested resource does not exist", "packets in order to be impaired by this profile. The", "an exact match ensure the parameter value starts with ^", "the container. Returns ------- - self: This instance with all", "fixedClassifier resource on the server and adds it to the", "retrieved from the server using the FixedClassifier.find() method. The list", "server available through an iterator or index Raises ------ -", "fixedClassifier data from the server. Args ---- - href (str):", "in # all copies or substantial portions of the Software.", "locals())) def read(self, href): \"\"\"Retrieves a single instance of fixedClassifier", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "on the server using regex. The named parameters can be", "Returns ------- - self: This instance with all currently retrieved", "resource on the server and adds it to the container.", "persons to whom the # Software is furnished to do", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "files (the \"Software\"), # to deal in the Software without", "parameters and will retrieve all fixedClassifier resources from the server.", "without restriction, including without limitation # the rights to use,", "encapsulates a list of fixedClassifier resources that are managed by", "server has encountered an uncategorized error condition \"\"\" return self._read(href)", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "MIT LICENSE # # Copyright 1997 - 2020 by IXIA", "@property def Pattern(self): \"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance", "\"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): \"\"\"Retrieves a single", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to any person obtaining a copy # of this software", "------- - self: This instance with matching fixedClassifier resources retrieved", "of the Software, and to permit persons to whom the", "to apply this profile to. If there are multiple patterns", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None) is not None: return", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "distribute, sublicense, # and/or sell copies of the Software, and", "value starts with ^ and ends with $ By default", "FixedClassifier class encapsulates a list of fixedClassifier resources that are", "resources retrieved from the server available through an iterator or", "fixedClassifier resources in this instance from the server. Raises ------", "error condition \"\"\" self._delete() def find(self): \"\"\"Finds and retrieves fixedClassifier", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "None) is not None: return self._properties.get('Pattern') else: return Pattern(self) def", "locals())) def remove(self): \"\"\"Deletes all the contained fixedClassifier resources in", "retrieved Returns ------- - self: This instance with the fixedClassifier", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "resources in this instance from the server. Raises ------ -", "substantial portions of the Software. # # THE SOFTWARE IS", "and will retrieve all fixedClassifier resources from the server. Returns", "of fixedClassifier data from the server. Args ---- - href", "merge, publish, distribute, sublicense, # and/or sell copies of the", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "Files from typing import List, Any, Union class FixedClassifier(Base): \"\"\"Specifies", "------ - ServerError: The server has encountered an uncategorized error", "iterator or index Raises ------ - ServerError: The server has", "data from the server. Args ---- - href (str): An", "IXIA Keysight # # Permission is hereby granted, free of", "resources from the server. Returns ------- - self: This instance", "href (str): An href to the instance to be retrieved", "server and adds it to the container. Returns ------- -", "to the instance to be retrieved Returns ------- - self:", "that are managed by the user. A list of resources", "are managed by the user. A list of resources can", "any person obtaining a copy # of this software and", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "can be retrieved from the server using the FixedClassifier.find() method.", "uhd_restpy.base import Base from uhd_restpy.files import Files from typing import", "the server. Returns ------- - self: This instance with matching", "of fixedClassifier resources that are managed by the user. A", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "in this instance from the server. Raises ------ - NotFoundError:", "of this software and associated documentation files (the \"Software\"), #", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "and the newly added fixedClassifier resources available through an iterator", "match ensure the parameter value starts with ^ and ends", "the server available through an iterator or index Raises ------", "the Software without restriction, including without limitation # the rights", "by using the FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\" __slots__ =", "is not None: return self._properties.get('Pattern') else: return Pattern(self) def add(self):", "not None: return self._properties.get('Pattern') else: return Pattern(self) def add(self): \"\"\"Adds", "encountered an uncategorized error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def", "permit persons to whom the # Software is furnished to", "all packets in order to be impaired by this profile.", "copyright notice and this permission notice shall be included in", "def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self):", "- self: This instance with the fixedClassifier resources from the", "server. All named parameters are evaluated on the server using", "= () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP", "this instance from the server. Raises ------ - NotFoundError: The", "the server using regex. The named parameters can be used", "} _SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False): super(FixedClassifier,", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "free of charge, to any person obtaining a copy #", "use, copy, modify, merge, publish, distribute, sublicense, # and/or sell", "if self._properties.get('Pattern', None) is not None: return self._properties.get('Pattern') else: return", "retrieve all fixedClassifier resources from the server. Returns ------- -", "available through an iterator or index Raises ------ - NotFoundError:", "server - ServerError: The server has encountered an uncategorized error", "# and/or sell copies of the Software, and to permit", "Pattern class Raises ------ - ServerError: The server has encountered", "managed by using the FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\" __slots__", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "and adds it to the container. Returns ------- - self:", "{ } _SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False):", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "class encapsulates a list of fixedClassifier resources that are managed", "instance of the Pattern class Raises ------ - ServerError: The", "with the fixedClassifier resources from the server available through an", "return self._properties.get('Pattern') else: return Pattern(self) def add(self): \"\"\"Adds a new", "parameters are evaluated on the server using regex. The named", "Args ---- - href (str): An href to the instance", "copy, modify, merge, publish, distribute, sublicense, # and/or sell copies", "error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): \"\"\"Retrieves", "limitation # the rights to use, copy, modify, merge, publish,", "self._properties.get('Pattern') else: return Pattern(self) def add(self): \"\"\"Adds a new fixedClassifier", "does not exist on the server - ServerError: The server", "match all packets in order to be impaired by this", "be impaired by this profile. The FixedClassifier class encapsulates a", "None: return self._properties.get('Pattern') else: return Pattern(self) def add(self): \"\"\"Adds a", "Returns ------- - self: This instance with matching fixedClassifier resources", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "remove(self): \"\"\"Deletes all the contained fixedClassifier resources in this instance", "fixedClassifier resources from the server. All named parameters are evaluated", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "An instance of the Pattern class Raises ------ - ServerError:", "resources from the server. All named parameters are evaluated on", "has encountered an uncategorized error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))", "method. The list can be managed by using the FixedClassifier.add()", "def Pattern(self): \"\"\" Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern.Pattern): An instance of", "\"\"\"Finds and retrieves fixedClassifier resources from the server. All named", "# to deal in the Software without restriction, including without", "an uncategorized error condition \"\"\" self._delete() def find(self): \"\"\"Finds and", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "resources that are managed by the user. A list of", "uncategorized error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern',", "list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self): \"\"\" Returns -------", "1997 - 2020 by IXIA Keysight # # Permission is", "IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files", "fixedClassifier resources from the server. Returns ------- - self: This", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "- 2020 by IXIA Keysight # # Permission is hereby", "ServerError: The server has encountered an uncategorized error condition \"\"\"", "_SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP = {", "from uhd_restpy.base import Base from uhd_restpy.files import Files from typing", "each packet must match all packets in order to be", "The list can be managed by using the FixedClassifier.add() and", "Copyright 1997 - 2020 by IXIA Keysight # # Permission", "charge, to any person obtaining a copy # of this", "__init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op) @property def Pattern(self): \"\"\"", "retrieves fixedClassifier resources from the server. All named parameters are", "error condition \"\"\" from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None)", "FixedClassifier.find() method. The list can be managed by using the", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "can be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods.", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "requested resource does not exist on the server - ServerError:", "all currently retrieved fixedClassifier resources using find and the newly", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "this permission notice shall be included in # all copies", "If there are multiple patterns enabled, they are ANDed: each", "instance from the server. Raises ------ - NotFoundError: The requested", "and ends with $ By default the find method takes", "from the server using the FixedClassifier.find() method. The list can", "on the server - ServerError: The server has encountered an", "A list of resources can be retrieved from the server", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "in order to be impaired by this profile. The FixedClassifier", "= 'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP = { }", "resource does not exist on the server - ServerError: The", "retrieved fixedClassifier resources using find and the newly added fixedClassifier", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "---- - href (str): An href to the instance to", "server. Raises ------ - NotFoundError: The requested resource does not", "find method takes no parameters and will retrieve all fixedClassifier", "server has encountered an uncategorized error condition \"\"\" self._delete() def", "is furnished to do so, subject to the following conditions:", "the Pattern class Raises ------ - ServerError: The server has", "copies or substantial portions of the Software. # # THE", "ensure the parameter value starts with ^ and ends with", "href): \"\"\"Retrieves a single instance of fixedClassifier data from the", "single instance of fixedClassifier data from the server. Args ----", "all fixedClassifier resources from the server. Returns ------- - self:", "contained fixedClassifier resources in this instance from the server. Raises", "List, Any, Union class FixedClassifier(Base): \"\"\"Specifies the packets to apply", "an uncategorized error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self,", "\"\"\"Specifies the packets to apply this profile to. If there", "through an iterator or index Raises ------ - ServerError: The", "from the server available through an iterator or index Raises", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "FixedClassifier.remove() methods. \"\"\" __slots__ = () _SDM_NAME = 'fixedClassifier' _SDM_ATT_MAP", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "from uhd_restpy.testplatform.sessions.ixnetwork.impairment.profile.fixedclassifier.pattern.pattern import Pattern if self._properties.get('Pattern', None) is not None:", "the server. Raises ------ - NotFoundError: The requested resource does", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "the user. A list of resources can be retrieved from", "def remove(self): \"\"\"Deletes all the contained fixedClassifier resources in this", "to the following conditions: # # The above copyright notice", "# MIT LICENSE # # Copyright 1997 - 2020 by", "regex. The named parameters can be used to selectively retrieve", "FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\" __slots__ = () _SDM_NAME =", "resources available through an iterator or index Raises ------ -", "and FixedClassifier.remove() methods. \"\"\" __slots__ = () _SDM_NAME = 'fixedClassifier'", "adds it to the container. Returns ------- - self: This", "------- - self: This instance with all currently retrieved fixedClassifier", "THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "through an iterator or index Raises ------ - NotFoundError: The", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "order to be impaired by this profile. The FixedClassifier class", "be managed by using the FixedClassifier.add() and FixedClassifier.remove() methods. \"\"\"", "has encountered an uncategorized error condition \"\"\" self._delete() def find(self):", "from the server. To retrieve an exact match ensure the", "apply this profile to. If there are multiple patterns enabled,", "def find(self): \"\"\"Finds and retrieves fixedClassifier resources from the server.", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "= { } def __init__(self, parent, list_op=False): super(FixedClassifier, self).__init__(parent, list_op)", "no parameters and will retrieve all fixedClassifier resources from the", "An href to the instance to be retrieved Returns -------", "to. If there are multiple patterns enabled, they are ANDed:", "FixedClassifier(Base): \"\"\"Specifies the packets to apply this profile to. If", "encountered an uncategorized error condition \"\"\" self._delete() def find(self): \"\"\"Finds", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "used to selectively retrieve fixedClassifier resources from the server. To", "The requested resource does not exist on the server -", "container. Returns ------- - self: This instance with all currently", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "parameter value starts with ^ and ends with $ By", "to use, copy, modify, merge, publish, distribute, sublicense, # and/or", "fixedClassifier resources from the server available through an iterator or", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "from the server. Raises ------ - NotFoundError: The requested resource", "return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes all the contained fixedClassifier", "granted, free of charge, to any person obtaining a copy", "------- - self: This instance with the fixedClassifier resources from", "import List, Any, Union class FixedClassifier(Base): \"\"\"Specifies the packets to", "not exist on the server - ServerError: The server has", "obtaining a copy # of this software and associated documentation", "server has encountered an uncategorized error condition \"\"\" return self._select(self._map_locals(self._SDM_ATT_MAP,", "this profile to. If there are multiple patterns enabled, they", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "publish, distribute, sublicense, # and/or sell copies of the Software,", "The server has encountered an uncategorized error condition \"\"\" return", "self._delete() def find(self): \"\"\"Finds and retrieves fixedClassifier resources from the", "copy # of this software and associated documentation files (the", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Permission is hereby granted, free of charge, to any person", "find and the newly added fixedClassifier resources available through an", "Software without restriction, including without limitation # the rights to", "def add(self): \"\"\"Adds a new fixedClassifier resource on the server", "The above copyright notice and this permission notice shall be", "uncategorized error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes", "in the Software without restriction, including without limitation # the", "NotFoundError: The requested resource does not exist on the server", "error condition \"\"\" return self._create(self._map_locals(self._SDM_ATT_MAP, locals())) def remove(self): \"\"\"Deletes all", "# the rights to use, copy, modify, merge, publish, distribute,", "be retrieved Returns ------- - self: This instance with the", "server. To retrieve an exact match ensure the parameter value", "named parameters are evaluated on the server using regex. The", "without limitation # the rights to use, copy, modify, merge,", "whom the # Software is furnished to do so, subject", "'fixedClassifier' _SDM_ATT_MAP = { } _SDM_ENUM_MAP = { } def", "sublicense, # and/or sell copies of the Software, and to", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "This instance with the fixedClassifier resources from the server available", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "it to the container. Returns ------- - self: This instance", "uhd_restpy.files import Files from typing import List, Any, Union class", "or index Raises ------ - NotFoundError: The requested resource does", "enabled, they are ANDed: each packet must match all packets", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "using regex. The named parameters can be used to selectively", "resources can be retrieved from the server using the FixedClassifier.find()" ]
[ "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf %s,", "name, address, reset=False): # Instrument.__init__(self, name, tags=['physical']) # self._port_name =", "# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl =", "types.FloatType, # units = 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') #", "self._currentwl \"\"\" Not sure what this is doing yet \"\"\"", "eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]", "name, address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self)", "return \"\"\" I have absolutely no idea what the hell", "ctypes.wintypes import DWORD, WORD import numpy as np \"\"\" okay", "ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD())", "the minimum and maximum wavelengths for the motor self._minwl =", "mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if", "= 1 elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign = -1", "are being defined in this class: check(self) before wreck(self) ok", "the function # VCS_GetErrorInfo doesn't exist! # Get operation mode,", "value under square root sign -- something is wrong') if", "(stored position offset) # from the device's \"homposition\" object self._offset", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle,", "% mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1", "version that works But only in the lab32 virtual environment", "# More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1)", "\"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port", "+ ' EPOS motor enabled, disabling before proceeding.') ret =", "ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a, b, c]): print(subidx, coeff)", "current_pos = epos.get_motor_position() # print('current position is {}'.format(current_pos)) # new_pos", "(int(mantissa_bit) << 31) + (int(c * 1e6) << 8) +", "'exp extract %s' % bin(int(uinput & exp_mask)) # print 'exp", "+ ' Negative value under square root sign -- something", "print('') print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes", "print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos,", "logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding') else: logging.error(__name__", "# get sign of exponent if Sacher_EPOS.get_bit(uinput, 7) == False:", "print('Movement state is %s' % pMovementState.contents.value) if pMovementState.contents.value == 1:", "= ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray =", "= False self._HPM = True # self.add_parameter('wavelength', # flags =", "exp_mask) output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput", "# print('Step 5...') # print('#4 Motor current: {}'.format(self.get_motor_current())) if 1", "At any rate there doesn't seem to be a lot", "# print 'close device returned %s' % buf if int(buf.contents.value)", "# VCS_GetErrorInfo doesn't exist! # Get operation mode, check if", "= eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get", "output position and the stored offset # print('Step 4...') diff_wavelength_offset", "open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype =", "screw to loosen causes large gold base to tighten decreasing", "= ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData =", "is %d' % self.get_motor_position()) # print('check #5') # print(nchecks) #", "current_motor_pos = self.get_motor_position() # Step 2: Get the motor offset", "pImmediately, ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor", "+ self._doubleB * 5000.0 + self._doubleC # logging.error(__name__ + '", "# floating point conversion in the sacher VIs # get", "state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6') #", "that's talking to the epos dll \"\"\" HISTCHAN = 65536", "none of them are explained way to go dc you", "if __name__ == '__main__': epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)", "as np \"\"\" okay so we import a bunch of", "the initialize function so I wonder what initialize(self) is doing", "= self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0", "5000.0 + self._doubleC # logging.error(__name__ + ' Sacher wavelength calibration", "ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state ret %s", "state is %s' % pMovementState.contents.value) if pMovementState.contents.value == 1: break", "FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003 # in mV ZCMIN", "get the motor position (stored position offset) # from the", "0x0003 # in mV ZCMIN = 0 ZCMAX = 20", "ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0:", "the actual motor position # print('Getting motor position') current_motor_pos =", "m.SacherLasaTeknique() # print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll')", "new_motor_pos = self.get_motor_position() # print('New motor position is %s' %", "# epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor", "* 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_))", "Not sure what this is doing yet \"\"\" def do_get_wavelength(self):", "ret = ctypes.wintypes.HANDLE() # print 'types are all %s %s", "wavelength') # self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this", "ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0)", "np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD & 0xffff) # Set", "return output \"\"\" ok dc gave some slight explanations here", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle,", "ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32", "at this point \"\"\" if __name__ == '__main__': epos =", "mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput &", "is doing yet \"\"\" def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf", "pos5000: x = b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position", "motor self._minwl = float(firstHalf) / 10.0 self._maxwl = float(secondHalf) /", "return CastedObjectData[0] \"\"\" Not sure what this is doing yet", "= ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder counts in", "% uinput # print 'type uin %s' % type(uinput) #", "StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 *", "1000 # in mV PHR800LVMIN = -1600 PHR800LVMAX = 2400", "Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "# print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set motor position ret", "= ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) !=", "= 0x0040 FLAG_FIFOFULL = 0x0003 # in mV ZCMIN =", "position buf %s' % buf.value) steps_per_second = 14494.0 # hardcoded,", "move to the left when square is stuck in causes", "ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret =", "self.get_offset() # print('Motor offset is %s' % self._offset) # Step", "exponent %s' % self.get_bit(uinput,7) # print 'binary constant is %s'", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret))", "self._doubleC self._currentwl = self._doubleA * ( self.get_motor_position()) ** 2.0 +", "# if mode is not 1, make it 1 if", "= ctypes.wintypes.HANDLE() # print 'types are all %s %s %s", "buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "# def get_bit(self, byteval,idx): return ((byteval & (1 << idx))", "to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] #", "them I'll comment about them as I go through them", "buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes =", "epos.initialize() because there's an __init__ function which contains epos.initialize() \"\"\"", "Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6", "Motor current: {}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh') # if", "# print('') time.sleep(0.01) # Now set disabled state ret =", "Get operation mode, check if it's 1 -- this is", "motor position new_motor_pos = self.get_motor_position() # print('New motor position is", "current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor", "ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32", "else 0, uinput & exp_mask) output = mantissa * 2.0", "data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0]", "%s' % type(uinput) # print 'binary input is %s' %", "sure what this is doing yet \"\"\" def find_home(self): nodeID", "= float(secondHalf) / 10.0 # print 'first %s second %s'", "# print('Current motor position is %d' % self.get_motor_position()) # print('check", "(4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) /", "nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get the motor", "only self explanatory one it disconnects \"\"\" @staticmethod def get_bit(byteval,", "input is %s' % bin(long(uinput)) # get sign of exponent", "state ret %s buf %s' % (ret, buf.value)) # print('#6", "is %s' % bin((uinput & mantissa_mask) >> 8) mantissa =", "did not close Sacher EPOS motor correctly.') return \"\"\" Apparently", "the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if", "\"\"\" def is_open(self): return self._is_open \"\"\" Not sure what this", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear", "I'll have to worry about this later \"\"\" @staticmethod def", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf))", "ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0))", "execute disconnect self.close() return \"\"\" this might be the only", "ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise", "PositionSensorType, ctypes.byref(buf)) ## if ret == int(0): ## print 'errr'", "ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL,", "called LibC # m.register(\"SacherLasaTeknique\") # access and use libc #", "set the absolute position to true \"\"\" # self.set_target_position(10000, False,", "False, True) # Step 7: Get the actual motor position", "program -- I don't think # any documentation exists on", "== 0: logging.error(__name__ + ' Could not read stored position", "VCS_GetErrorInfo doesn't exist! # Get operation mode, check if it's", "(2.0 * self._doubleA) sqrtarg = self._doubleB ** 2.0 / (4.0", "sudo git a_clue \"\"\" @staticmethod def _u32todouble(uinput): # def _u32todouble(self,", "= ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,", "true \"\"\" # self.set_target_position(10000, False, True) # Step 7: Get", "this is doing yet \"\"\" def fine_tuning_steps(self, steps): current_motor_pos =", "(ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))", "'uin u is %d' % uinput # print 'type uin", "motor. In use?') \"\"\" I mean to me this really", "use them but honestly I don't really get what this", "wavelength: causes the square to rotate left causes base to", "pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg))", "Sacher class thing let me just list here all the", "(0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC pos5000", "ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!", "\"\"\" def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes", "% buf if int(buf.contents.value) >= 0: self._is_open = False else:", "b'RS232' \"\"\" Max on Max off but anyway it looks", "current_motor_pos = self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False, True) new_motor_pos", "hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead =", "what this is doing yet \"\"\" def fuck_my_life(self, wavelength): print('goddamn", "CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__ +", "constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask", "0x0040 FLAG_FIFOFULL = 0x0003 # in mV ZCMIN = 0", "# Step 1: Get the actual motor position # print('Getting", "print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print", "here \"\"\" def __del__(self): # execute disconnect self.close() return \"\"\"", "+ self._doubleC # logging.error(__name__ + ' Sacher wavelength calibration polynomials", "self.get_motor_position()) # print('check #5') # print(nchecks) # print('') time.sleep(0.01) #", "of random stuff I always forget what ctypes is for", "position from Sacher EPOS motor') return CastedObjectData[0] \"\"\" Not sure", "(d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2 **", "nodeID, pMode, ctypes.byref(buf)) # if mode is not 1, make", "the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA", "def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps,", "new_motor_pos) # print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos -", "\"\"\" # from instrument import Instrument # import qt import", "int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)]", "I got from the LabVIEW program -- I don't think", "& 0xffff) # Set the minimum and maximum wavelengths for", "= eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state ret %s buf", "has a non-zero error code in it; the LabVIEW code", "x = b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is", "errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf =", "pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get the motor position (stored", "this point \"\"\" if __name__ == '__main__': epos = Sacher_EPOS(None,", "bin(int(uinput & exp_mask)) # print 'exp conv %s' % (exp_sign*int(uinput", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle,", "from ctypes.wintypes import DWORD, WORD import numpy as np \"\"\"", "the motor self._minwl = float(firstHalf) / 10.0 self._maxwl = float(secondHalf)", "print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset()))", "sign of position-to-wavelength pos0 = self._doubleA * (0.0) ** 2.0", "derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) # os.chdir(derp) # p", "self._offset = self.get_offset() # Now read the stored 'calculation parameters'", "square to rotate right causes base to move to the", "might be the only self explanatory one it disconnects \"\"\"", "import Instrument from ctypes.wintypes import DWORD, WORD import numpy as", "Not sure what this is doing yet \"\"\" def initialize(self):", "ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret", "# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))", "print('set motor position ret %s' % ret) # print('set motor", "restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype", "here all the functions that are being defined in this", "# print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg", "wavelengths for the motor self._minwl = float(firstHalf) / 10.0 self._maxwl", "10.0 # print 'first %s second %s' % (firstHalf, secondHalf)", "nodeID, pPosition, ctypes.byref(buf)) # print 'get motor position ret %s'", "print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore()", "need to explicitly run epos.initialize() because there's an __init__ function", "self.set_target_position(10000, False, True) # Step 7: Get the actual motor", "Not sure what this is doing yet \"\"\" def is_open(self):", "# self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open() self.initialize() # except:", "True) new_motor_pos = self.get_motor_position() # print('New motor position is %s'", "what this is doing yet \"\"\" def set_target_position(self, target, absolute,", "it appears that in the 2005 version of this DLL,", "pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID,", "get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE,", "state ret %s buf %s' % (ret, buf.value)) # print('Final", "%s %s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA)", "HISTCHAN = 65536 TTREADMAX = 131072 RANGES = 8 MODE_HIST", "ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten", "print('Homing: {}'.format(ret)) return ret \"\"\" Not sure what this is", "2 ** d_)) return (int(mantissa_bit) << 31) + (int(c *", "them but honestly I don't really get what this is", "# print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current()))", "3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003 # in mV", "0 # print('check #3') while nchecks < 1000: # get", "also damn there are 16 of them I'll comment about", "# in mV ZCMIN = 0 ZCMAX = 20 DISCRMIN", "** (float(exp_sign) * float(int(uinput & exp_mask))) # print 'output is", "pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB *", "\"\"\" I have absolutely no idea what the hell this", "set motor position') # print('Current motor position is %d' %", "forget what ctypes is for but I'll worry about it", "buf %s' % buf.value) steps_per_second = 14494.0 # hardcoded, estimated", "roughly, unused now nchecks = 0 # print('check #3') while", "calculate the roots b2a = -1.0 * self._doubleB / (2.0", "< 0.0: logging.error(__name__ + ' Negative value under square root", "1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >>", "eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are", ">> 8) mantissa = 1.0 / 1000000.0 * float(mantissa_sign) *", "something? ah whatever I'll have to worry about this later", "wavelength # print('Current wavelength is %.3f' % self.do_get_wavelength()) # print('setting", "CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE,", "\"\"\" Possbily Maxon EPOS now \"\"\" \"\"\" This is the", "ctypes import ctypes.wintypes import logging import time # from instrument", "'mantissa is %.12f' % mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else", "== '__main__': epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength()", "set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength)", "# print 'exp extract %s' % bin(int(uinput & exp_mask)) #", "estimated roughly, unused now nchecks = 0 # print('check #3')", "there are 16 of them I'll comment about them as", "def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes", "print('getting motor position...') # print(ret) # return print(pPosition.contents.value) \"\"\" Not", "0: self._is_open = True self._keyhandle = ret return \"\"\" I", "what this is doing yet \"\"\" def get_offset(self): nodeID =", "what ctypes is for but I'll worry about it later", "<< 8) + (int(exp_bit) << 7) + int(abs(d_)) def open(self):", "ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print 'get motor", "eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]", "eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName", "data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0]", "# print('check #2') # print('About to set motor position') #", "motor position is %d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID,", "0: a = -a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10)", "print 'errr' ## errbuf = ctypes.create_string_buffer(64) ## print 'sending' ##", "= ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ##", "in it; the LabVIEW code # doesn't check it. #", "d_ = np.ceil(d) c = a * 2 ** (d", "% (exp_sign*int(uinput & exp_mask)) # print 'sign of exponent %s'", "print 'done printer' if int(buf.contents.value) >= 0: self._is_open = True", "ret) # print('set motor position buf %s' % buf.value) #", "pos5000: x = b2a + np.sqrt(sqrtarg) # x is what", "'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg'", "# print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current()))", "bin(long(uinput)) # get sign of exponent if Sacher_EPOS.get_bit(uinput, 7) ==", "%s' % buf.value # print 'get motor position value %s'", "ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32", "parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' StoredPositionObject", "0: logging.error(__name__ + ' Could not write stored position from", "= self.get_motor_position() # Step 2: Get the motor offset self._offset", "return \"\"\" Apparently this closes the EPOS motor I don't", "10,000 steps # print('Step 5...') # print('#4 Motor current: {}'.format(self.get_motor_current()))", "StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray", "be the only self explanatory one it disconnects \"\"\" @staticmethod", "the desired wavelength into a position # Check sign of", "x) wavelength_to_pos = int(round(x)) # Step 4: Calculate difference between", "ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient", "weird/non-standard U32 to floating point conversion in the sacher VIs\"", "set_target_position(self, target, absolute, immediately): # print('check #1') nodeID = ctypes.wintypes.WORD(0)", "sign -- something is wrong') if pos0 > pos5000: #", "This is the actual version that works But only in", "= ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData =", "ret return \"\"\" I have absolutely no idea what the", "print('check #1') nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First,", "thing that's talking to the epos dll \"\"\" HISTCHAN =", "print('check #6') # print('Disable state ret %s buf %s' %", "EPOS motor correctly.') return \"\"\" Apparently this closes the EPOS", "and maximum wavelengths for the motor self._minwl = float(firstHalf) /", "list here all the functions that are being defined in", "= Instrument.FLAG_GETSET, # type = types.FloatType, # units = 'nm',", "move to the right when square is stuck in causes", "self.do_get_wavelength()) # print('setting wavelength done') return \"\"\" Not sure what", "this later \"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit = 0 if", "dinput / 10 ** b if dinput < 0: a", "yet \"\"\" def is_open(self): return self._is_open \"\"\" Not sure what", "'err' ## raise ValueError(errbuf.value) # For some reason, it appears", "coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,", "loosen, and also unplug the motor Also you don't need", "close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype =", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0)", "ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL", "#print('Step 6a... diff wavelength') # # self.set_target_position(10000, False, True) else:", "[ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ## ret =", "False, True) # # Step 6: Set the real target", "of this DLL, the function # VCS_GetErrorInfo doesn't exist! #", "# print('New motor position is %s' % new_motor_pos) # print", "(exp_sign*int(uinput & exp_mask)) # print 'sign of exponent %s' %", "#7') return ret \"\"\" Not sure what this is doing", "return \"\"\" this might be the only self explanatory one", "in this class: check(self) before wreck(self) ok but actually: __init__(self,", "\"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\"", "documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex", "the square to rotate left causes base to move to", "# get sign of number sign = Sacher_EPOS.get_bit(uinput, 31) if", "bits, and then you use them but honestly I don't", "buf.contents.value # print 'done printer' if int(buf.contents.value) >= 0: self._is_open", "buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret))", "is doing yet \"\"\" def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf", "plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) #", "ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead", "# print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() #", "# print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName", "* 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret", "Sacher_EPOS(): \"\"\" ok before I dive into this giant Sacher", "coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray", "= ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL", "ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These", "# any documentation exists on particular object indices StoredPositionObject =", "eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204)", "# print 'Now calculate the current wavelength position:' self._currentwl =", "done') return True \"\"\" Not sure what this is doing", "BaseManager # import atexit # import os # python32_dir =", "and print current wavelength # print('Current wavelength is %.3f' %", "\"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port = int(p.stdout.readline()) #", "BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect() # tell manager to expect", "In use?') \"\"\" I mean to me this really seems", "True: exp_sign = -1 # print 'exp extract %s' %", "ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "False: mantissa_sign = 1 elif sign == True: mantissa_sign =", "current_pos + 10000 # epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) #", "## print 'err' ## raise ValueError(errbuf.value) # For some reason,", "secondHalf) # This returns '10871' and '11859' for the Sacher,", "ms ACQTMIN = 1 ACQTMAX = 10 * 60 *", "+ self._doubleB * self.get_motor_position() + self._doubleC print('Current wavelength: %.3f nm'", "pProfileDeceleration, ctypes.byref(buf)) # Now get the motor position (stored position", "@staticmethod def get_bit(byteval, idx): # def get_bit(self, byteval,idx): return ((byteval", "ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore:", "int(p.stdout.readline()) # authkey = p.stdout.read() # print(port, authkey) # m", "in the sacher VIs # get sign of number sign", "a = -a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10) *", "{}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID,", "this is doing yet \"\"\" def set_coeffs(self, a, b, c,", "to tighten causes large gold base to loosen, and also", "the wavelength position is lower, overshoot # the movement by", "wavelength: %.3f nm' % self._currentwl) return self._currentwl \"\"\" Not sure", "what this is doing yet \"\"\" def do_get_wavelength(self): self._offset =", "& exp_mask)) # print 'sign of exponent %s' % self.get_bit(uinput,7)", "StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData", "enabled state # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor", "are hardcoded values I got from the LabVIEW program --", "motor') return CastedObjectData[0] \"\"\" Not sure what this is doing", "ctypes.wintypes.BOOL # These are hardcoded values I got from the", "plsenabled) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf,", "1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret =", "set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes =", "really weird/non-standard U32 to # floating point conversion in the", "don't think # any documentation exists on particular object indices", "'sign of exponent %s' % self.get_bit(uinput,7) # print 'binary constant", "= b2a - np.sqrt(sqrtarg) elif pos0 < pos5000: x =", "(self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB)", "* self.get_motor_position() + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl)", "logging.error(__name__ + ' Could not read stored position from Sacher", "# Take the + square root solution x = b2a", "the device's \"homposition\" object self._offset = self.get_offset() # Now read", "OK, use the quadratic formula to calculate the roots b2a", "= ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray =", "= eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value)", "what initialize(self) is doing At any rate there doesn't seem", "print('Current wavelength: %.3f nm' % self._currentwl) print('initializing done') return True", "ctypes.wintypes.BOOL # print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState,", "immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The last one", "%s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor", "self._doubleB * self.get_motor_position() + self._doubleC print('Current wavelength: %.3f nm' %", "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\"", "%s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf))", "print('Motor offset is %s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) #", "new offset' d = (min_wl << 16) + max_wl StoredPositionObject", "# # print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset - 10000,", "of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration,", "nm' % self._currentwl) return self._currentwl \"\"\" Not sure what this", "mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask))) #", "% pMovementState.contents.value) if pMovementState.contents.value == 1: break nchecks = nchecks", "TTREADMAX = 131072 RANGES = 8 MODE_HIST = 0 MODE_T2", "a wrong wavelength direction') # If that's OK, use the", "self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf =", "= np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD & 0xffff) #", "nm' % self._currentwl) print('initializing done') return True \"\"\" Not sure", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2') # print('About to", "True self._keyhandle = ret return \"\"\" I have absolutely no", "ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle,", "# print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos -", "got from the LabVIEW program -- I don't think #", "ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData =", "logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.') ret", "causes screw to loosen causes large gold base to tighten", "like ctypes is the thing that's talking to the epos", "let me just list here all the functions that are", "position:' self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB", "os.path.isdir(derp) # os.chdir(derp) # p = Popen([python32_dir + \"\\\\python.exe\", derp", "# print 'uin u is %d' % uinput # print", "is apparently closing the EPOS motor, maybe this is opening", "though and yeah also these random variables don't make any", "1 elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign = -1 #", "2.0 + self._doubleB * 5000.0 + self._doubleC # logging.error(__name__ +", "ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes", "gave some slight explanations here Apparently there's a \"really weird/non-standard", "ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More", "ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes =", "doing yet \"\"\" def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf", "def do_set_wavelength(self, wavelength): print('setting wavelength...') print('') # print('Coefficients are %s", "and the wavelength position is lower, overshoot # the movement", "= ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\" Not sure", "offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)", "0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf", "the correct # wavelength ranges in Angstroms # print 'Now", "self._doubleA * ( self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position()", "ret == 0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))", "ctypes.byref(buf)) print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))", "multiprocessing.managers import BaseManager # import atexit # import os #", "yet \"\"\" def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset =", "\"\"\" def do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0", "print 'set protocol buf %s ret %s' % (buf, ret)", "print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) #", "self._coefD = CastedObjectData[0] # print 'coefficients are %s %s %s", "ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL #", "ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode is", "tighten causes large gold base to loosen, and also unplug", "def _u32todouble(self, uinput): # this function implements the really weird/non-standard", "print 'output is %s' % output return output \"\"\" ok", "motor correctly.') return \"\"\" Apparently this closes the EPOS motor", "** 2.0 + self._doubleB * self._offset + self._doubleC print('Current wavelength:", "(int(c * 1e6) << 8) + (int(exp_bit) << 7) +", "nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the", "is %s' % x) wavelength_to_pos = int(round(x)) # Step 4:", "exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign =", "ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype", "type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name,", "bunch a variables and none of them are explained way", "MODE_T3 = 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003 #", "StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData", "yet \"\"\" def fuck_my_life(self, wavelength): print('goddamn this piece of shit')", "run epos.initialize() because there's an __init__ function which contains epos.initialize()", "ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print 'get", "ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p()", "BaudRate = DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate,", "'get enable state buf %s ret %s and en %s'", "print(port, authkey) # m = BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect()", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First, set enabled state #", "documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex", "bin((uinput & mantissa_mask) >> 8) mantissa = 1.0 / 1000000.0", "the LabVIEW code # doesn't check it. # Also, it", "# print('current position is {}'.format(current_pos)) # new_pos = current_pos +", "print('Current wavelength is %.3f' % self.do_get_wavelength()) # print('setting wavelength done')", "print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) #", "StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData", "import atexit # import os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" #", "= eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if", "self._offset) \"\"\" Not sure what this is doing yet \"\"\"", "= b2a + np.sqrt(sqrtarg) # x is what the motor", "self._doubleC # logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated", "real target position # # \"\"\" # HEY LOOK EVERYONE", "More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead", "is stuck in causes screw to tighten causes large gold", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf =", "int(self._offset) # print('Diff wavelength offset %s' % diff_wavelength_offset) # Step", "authkey = p.stdout.read() # print(port, authkey) # m = BaseManager(address=(\"localhost\",", "ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL", "# access and use libc # libc = m.SacherLasaTeknique() #", "# print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos", "epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor", "2.0) - (self._doubleC - wavelength) / self._doubleA # print('wut da", "# print 'coefficients are %s %s %s %s' % (self._coefA,", "= eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6') # print('Disable state", "b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232' \"\"\" Max on", "Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput", "StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0: logging.error(__name__ + '", "CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE,", "Apparently this closes the EPOS motor I don't know what", "ACQTMAX = 10 * 60 * 60 * 1000 #", "%s' % (ret, buf.value)) # print('Final motor position is %d'", "me just list here all the functions that are being", "print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES: increasing wavelength: causes", "really long And also damn there are 16 of them", "eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ + '", "= self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf =", "def is_open(self): return self._is_open \"\"\" Not sure what this is", "errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\" Not sure what this is", "= eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable state", "coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead =", "1000000000 # in ms ACQTMIN = 1 ACQTMAX = 10", "direction') # If that's OK, use the quadratic formula to", "libc = m.SacherLasaTeknique() # print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib", "eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf %s, ret %s' %", "difference between the output position and the stored offset #", "return \"\"\" Not sure what this is doing yet \"\"\"", "# print(ret) # return print(pPosition.contents.value) \"\"\" Not sure what this", "epos motor # <NAME> <<EMAIL>>, August 2014 # \"\"\" Possbily", "print(pPosition.contents.value) \"\"\" Not sure what this is doing yet \"\"\"", "what the motor position should be # print('Position is %s'", "U32 to # floating point conversion in the sacher VIs", "to expect an attribute called LibC # m.register(\"SacherLasaTeknique\") # access", "d_)) return (int(mantissa_bit) << 31) + (int(c * 1e6) <<", "eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' d =", "maybe this is opening it \"\"\" def close(self): print('closing EPOS", "1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL", "StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray", "turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType,", "not 1, make it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes", "nchecks = 0 # print('check #3') while nchecks < 1000:", "Maxon EPOS now \"\"\" \"\"\" This is the actual version", "ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded", "ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print 'set protocol", "self._HPM and diff_wavelength_offset < 0: # # print('Overshooting by 10000')", "## print 'errr' ## errbuf = ctypes.create_string_buffer(64) ## print 'sending'", "buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print 'types are", "ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret", "\"\"\" def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret", "fuuuu') # print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000) if", "if -1 < dinput < 1 else 0 b =", "print('') # print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB,", "(type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName,", "= 131072 RANGES = 8 MODE_HIST = 0 MODE_T2 =", "\"\"\" you get the bits, and then you use them", "ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print", "be gr8 if I knew what U32's were unsigned 32", "Popen, PIPE # from multiprocessing.managers import BaseManager # import atexit", "motor position is %d' % (self.get_motor_position())) # print('check #7') return", "dc you da real champ \"\"\" class Sacher_EPOS(): \"\"\" ok", "the motor position (stored position offset) # from the device's", "ctypes is the thing that's talking to the epos dll", "the actual version that works But only in the lab32", "absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The last", "ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6') # print('Disable", "= ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)]", "self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA * (", "self._port_name = str(address) self._port_name = address self._is_open = False self._HPM", "WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret", "8 MODE_HIST = 0 MODE_T2 = 2 MODE_T3 = 3", "EPOS motor. In use?') \"\"\" I mean to me this", "U32's were unsigned 32 bit something something? ah whatever I'll", "StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s %s' % (self._doubleA,", "Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor current:", "Counts, PositionSensorType, ctypes.byref(buf)) ## if ret == int(0): ## print", "sure what this is doing yet \"\"\" def initialize(self): nodeID", "or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,", "pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int( 60000)", "to explicitly run epos.initialize() because there's an __init__ function which", "the movement by 10,000 steps # print('Step 5...') # print('#4", "current: {}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh') # if self._HPM", "import ctypes.wintypes import logging import time # from instrument import", "what this is doing yet \"\"\" \"\"\" Also we're done", "nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret ==", "\"\"\" # HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS", "address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self)", "ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL", "Sacher wavelength calibration polynomials indicated a wrong wavelength direction') #", "= ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray,", "@staticmethod def _u32todouble(uinput): # def _u32todouble(self, uinput): # this function", "print('') # print('check #4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor position:", "me this really seems like the initialize function so I", "print('New motor position is %s' % new_motor_pos) # print 'new", "self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open() self.initialize() # except: #", "1000: # get the movement state. a movement state of", "activated and the wavelength position is lower, overshoot # the", "ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values", "# print 'set protocol buf %s ret %s' % (buf,", "+ ' did not close Sacher EPOS motor correctly.') return", "this is doing yet \"\"\" def set_target_position(self, target, absolute, immediately):", "nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable state buf %s", "an attribute called LibC # m.register(\"SacherLasaTeknique\") # access and use", "rate there doesn't seem to be a lot going on", "%s' % self._keyhandle # print 'open device ret %s' %", "import time # from instrument import Instrument from ctypes.wintypes import", "> int(11400) or int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value) >", "elif pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) #", "left when square is stuck in causes screw to loosen", "use the quadratic formula to calculate the roots b2a =", "ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf =", "is %s' % pMovementState.contents.value) if pMovementState.contents.value == 1: break nchecks", "# print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2 ** d_)) return (int(mantissa_bit)", "ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to", "return self._is_open \"\"\" Not sure what this is doing yet", "print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName =", "sacher VIs\" It'd be gr8 if I knew what U32's", "1 if -1 < dinput < 1 else 0 b", "buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print", "= self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD", "ctypes.wintypes import logging import time # from instrument import Instrument", "print('#4 Motor current: {}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh') #", "ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__ + ' Could", "epos.do_get_wavelength() # print('motor position is...') # current_pos = epos.get_motor_position() #", "print('Current motor position is %d' % self.get_motor_position()) # print('check #5')", "StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s", "# function actually returns an error, i.e. the return value", "pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or", "& mantissa_mask) >> 8) mantissa = 1.0 / 1000000.0 *", "really seems like the initialize function so I wonder what", "2.0 + self._doubleB * self.get_motor_position() + self._doubleC print('Current wavelength: %.3f", "rotate left causes base to move to the left when", "position ret %s' % ret # print 'get motor position", "## raise ValueError(errbuf.value) # For some reason, it appears normal", "%.3f nm' % self._currentwl) print('initializing done') return True \"\"\" Not", "firstHalf = np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD & 0xffff)", "diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True)", "print 'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0)", "new_motor_pos) # print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos", "it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "# <NAME> <<EMAIL>>, August 2014 # \"\"\" Possbily Maxon EPOS", "is done moving # print('') # print('check #4') # print('Motor", "# print('check #3') while nchecks < 1000: # get the", "print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset:", "+ 10000 # epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) # print('#2", "== False: mantissa_sign = 1 elif sign == True: mantissa_sign", "is %s' % new_motor_pos) # print('new offset is %s' %", "EPOS now \"\"\" \"\"\" This is the actual version that", "d_, c * 2 ** d_)) return (int(mantissa_bit) << 31)", "this piece of shit') print('') print('Coefficients are %s %s %s'", "set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self)", "python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp", "print('uh-oh') # if self._HPM and diff_wavelength_offset < 0: # #", "return ret \"\"\" Not sure what this is doing yet", "if ret == 0: logging.error(__name__ + ' Could not read", "Angstroms # print 'Now calculate the current wavelength position:' self._currentwl", "diff_wavelength_offset = wavelength_to_pos - int(self._offset) # print('Diff wavelength offset %s'", "position buf %s' % buf.value # print 'get motor position", "1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask) output =", "# Step 2: Get the motor offset self._offset = self.get_offset()", "doing yet \"\"\" def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf =", "= np.int16(self._coefD & 0xffff) # Set the minimum and maximum", "what this is doing yet \"\"\" def fine_tuning_steps(self, steps): current_motor_pos", "8, get and print current wavelength # print('Current wavelength is", "CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print 'coefficients", "{}'.format(current_pos)) # new_pos = current_pos + 10000 # epos.set_target_position(new_pos, True,", "# print(nchecks) # print('') time.sleep(0.01) # Now set disabled state", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value", "exp_mask)) # print 'sign of exponent %s' % self.get_bit(uinput,7) #", "type = types.FloatType, # units = 'nm', # minval=1070.0,maxval=1180.0) #", "current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position is...') # current_pos", "if int(ret) != 0: logging.warning(__name__ + ' EPOS motor successfully", "= 0 # print('check #3') while nchecks < 1000: #", "a, b, c, min_wl, max_wl): print('') print(\"setting coefficients...\") nodeID =", "= 0x0003 # in mV ZCMIN = 0 ZCMAX =", "this is doing yet \"\"\" def set_new_offset(self, new_offset): nodeID =", "in ps OFFSETMIN = 0 OFFSETMAX = 1000000000 # in", "self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position() # print('New motor position", "logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...') ret =", "get sign of exponent if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign", "= ctypes.wintypes.BOOL # print('check #2') # print('About to set motor", "%s' % buf.value) # print('Movement state is %s' % pMovementState.contents.value)", "StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray", "motor position buf %s' % buf.value # print 'get motor", "wavelength offset %s' % diff_wavelength_offset) # Step 5: If HPM", "diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset:", "print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) >", "= ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret =", "sure what this is doing yet \"\"\" \"\"\" Also we're", "clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle,", "exist! # Get operation mode, check if it's 1 --", "use?') \"\"\" I mean to me this really seems like", "what \"opening\" and \"closing\" the motor means though and yeah", "print 'get motor position ret %s' % ret # print", "* ( self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() +", "<< 31) + (int(c * 1e6) << 8) + (int(exp_bit)", "b, c, min_wl, max_wl): print('') print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0)", "raise ValueError(errbuf.value) \"\"\" Not sure what this is doing yet", "0: logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.')", "# print('Enable state ret %s buf %s' % (ret, buf.value))", "ranges in Angstroms # print 'Now calculate the current wavelength", "python wrapper for sacher epos motor # <NAME> <<EMAIL>>, August", "cwd=derp) # atexit.register(p.terminate) # port = int(p.stdout.readline()) # authkey =", "Timeout, ctypes.byref(buf)) # print 'set protocol buf %s ret %s'", "Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5", "(buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) #", "= eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf %s, ret %s'", "this class: check(self) before wreck(self) ok but actually: __init__(self, name,", "Sacher EPOS motor') return CastedObjectData[0] \"\"\" Not sure what this", "the only self explanatory one it disconnects \"\"\" @staticmethod def", "= ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if", "try: self.open() self.initialize() # except: # logging.error('Error loading Sacher EPOS", "= ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print", "ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,", "def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret =", "buffer has a non-zero error code in it; the LabVIEW", "port), authkey=authkey) # m.connect() # tell manager to expect an", "%.3f nm' % self._currentwl) return self._currentwl \"\"\" Not sure what", "= ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret =", "= self.get_offset() # print('Motor offset is %s' % self._offset) #", "print('') print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))", "into this giant Sacher class thing let me just list", "' EPOS motor successfully disabled, proceeding') else: logging.error(__name__ + '", "\"\"\" def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "type(uinput) # print 'binary input is %s' % bin(long(uinput)) #", "% bin(long(uinput)) # get sign of exponent if Sacher_EPOS.get_bit(uinput, 7)", "< pos5000: x = b2a + np.sqrt(sqrtarg) # x is", "self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD &", "of number sign = Sacher_EPOS.get_bit(uinput, 31) if sign == False:", "if pos0 > pos5000: # Take the + square root", "current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step", "eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if", "position is %s' % new_motor_pos) # print('new offset is %s'", "port = int(p.stdout.readline()) # authkey = p.stdout.read() # print(port, authkey)", "= \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) # os.chdir(derp) # p =", "elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign = -1 # print", "= 1 if -1 < dinput < 1 else 0", "= ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print", "ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear fault buf", "ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData", "close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute,", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset'", "_u32todouble(uinput): # def _u32todouble(self, uinput): # this function implements the", "\"\"\" def set_coeffs(self, a, b, c, min_wl, max_wl): print('') print(\"setting", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID,", "# self.set_target_position(10000, False, True) # Step 7: Get the actual", "whatever I'll have to worry about this later \"\"\" @staticmethod", "ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf %s, ret", "a position # Check sign of position-to-wavelength pos0 = self._doubleA", "if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask) output = mantissa", "= ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype =", "print('Step 5...') # print('#4 Motor current: {}'.format(self.get_motor_current())) if 1 ==", "= 0 ZCMAX = 20 DISCRMIN = 0 DISCRMAX =", "%s second %s' % (firstHalf, secondHalf) # This returns '10871'", "{}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position is...') # current_pos =", "mV PHR800LVMIN = -1600 PHR800LVMAX = 2400 \"\"\" wooooooo a", "I don't really get what this is doing sudo git", "anyway it looks like ctypes is the thing that's talking", "ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print 'coefficients are %s", "ValueError(errbuf.value) # For some reason, it appears normal in the", "% (firstHalf, secondHalf) # This returns '10871' and '11859' for", "motor position is %s' % new_motor_pos) # print('new offset is", "ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2') # print('About", "ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...') ret", "__name__ == '__main__': epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) #", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret", "have to worry about this later \"\"\" @staticmethod def _doubletou32(dinput):", "* 5000.0 + self._doubleC # logging.error(__name__ + ' Sacher wavelength", "import a bunch of random stuff I always forget what", "pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7", "Not sure what this is doing yet \"\"\" def fuck_my_life(self,", "it's 1 -- this is \"profile position mode\" buf =", "is %s' % self._offset) # Step 3: Convert the desired", "DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232' \"\"\"", "point conversion in the sacher VIs # get sign of", "formula to calculate the roots b2a = -1.0 * self._doubleB", "epos.get_motor_position() # print('current position is {}'.format(current_pos)) # new_pos = current_pos", "%s' % pMovementState.contents.value) if pMovementState.contents.value == 1: break nchecks =", "hell this is doing Considering that close(self) is apparently closing", "+ ' EPOS motor successfully disabled, proceeding') else: logging.error(__name__ +", "# print('set motor position buf %s' % buf.value) # print('Movement", "= (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead =", "def _doubletou32(dinput): mantissa_bit = 0 if int(dinput / abs(dinput)) >", "a bunch of random stuff I always forget what ctypes", "them \"\"\" def __init__(self, name, address, reset=False): # Instrument.__init__(self, name,", "DISCRMAX = 800 # in ps OFFSETMIN = 0 OFFSETMAX", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD())", "check it. # Also, it appears that in the 2005", "self._is_open = True self._keyhandle = ret return \"\"\" I have", "= address self._is_open = False self._HPM = True # self.add_parameter('wavelength',", "2 MODE_T3 = 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003", "self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') #", "+ int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,", "ret %s' % ret # print 'get motor position buf", "Step 1: Get the actual motor position # print('Getting motor", "ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData", "hardcoded, estimated roughly, unused now nchecks = 0 # print('check", "moving # print('') # print('check #4') # print('Motor current: {}'.format(self.get_motor_current()))", "hardcoded values I got from the LabVIEW program -- I", "eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID,", "VIs # get sign of number sign = Sacher_EPOS.get_bit(uinput, 31)", "ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "we import a bunch of random stuff I always forget", "are the correct # wavelength ranges in Angstroms # print", "\"\"\" ok dc gave some slight explanations here Apparently there's", "motor position...') # print(ret) # return print(pPosition.contents.value) \"\"\" Not sure", "is doing yet \"\"\" def set_target_position(self, target, absolute, immediately): #", "# is done moving # print('') # print('check #4') #", "get the movement state. a movement state of 1 indicates", "is wrong') if pos0 > pos5000: # Take the +", "float(int(uinput & exp_mask))) # print 'output is %s' % output", "x = b2a + np.sqrt(sqrtarg) # x is what the", "wavelength done') return \"\"\" Not sure what this is doing", "values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)", "it disconnects \"\"\" @staticmethod def get_bit(byteval, idx): # def get_bit(self,", "8) # print 'mantissa is %.12f' % mantissa # print(1", "{}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state ret", "ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2')", "current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES: increasing", "= 0 if int(dinput / abs(dinput)) > 0 else 1", "the 2005 version of this DLL, the function # VCS_GetErrorInfo", "131072 RANGES = 8 MODE_HIST = 0 MODE_T2 = 2", "ok before I dive into this giant Sacher class thing", "\"\"\" if __name__ == '__main__': epos = Sacher_EPOS(None, b'COM3') #", "+ self._doubleB * 0.0 + self._doubleC pos5000 = self._doubleA *", "\"\"\" this might be the only self explanatory one it", "done with the Sacher_EPOS() class at this point \"\"\" if", "(self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) #", "= ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration =", "yet \"\"\" def set_target_position(self, target, absolute, immediately): # print('check #1')", "return self._currentwl \"\"\" Not sure what this is doing yet", "True \"\"\" Not sure what this is doing yet \"\"\"", "DLL, the function # VCS_GetErrorInfo doesn't exist! # Get operation", "eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID,", "% bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 #", "dc gave some slight explanations here Apparently there's a \"really", "= nchecks + 1 # print('Current motor position is %d'", "a_clue \"\"\" @staticmethod def _u32todouble(uinput): # def _u32todouble(self, uinput): #", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429)", "object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD =", "# except: # logging.error('Error loading Sacher EPOS motor. In use?')", "%d' % uinput # print 'type uin %s' % type(uinput)", "closing the EPOS motor, maybe this is opening it \"\"\"", "eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204)", "sure what this is doing yet \"\"\" def restore(self): nodeID", "yet \"\"\" def set_coeffs(self, a, b, c, min_wl, max_wl): print('')", "# print 'setting new offset' d = (min_wl << 16)", "'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print", "print 'types are all %s %s %s %s %s' %", "c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite =", "ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle,", "ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle,", "= ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)()", "the motor # is done moving # print('') # print('check", "= 1000000000 # in ms ACQTMIN = 1 ACQTMAX =", "motor position # print('Getting motor position') current_motor_pos = self.get_motor_position() #", "class thing let me just list here all the functions", "buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf)", "* 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret", "Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))", "%s' % ret # print 'get motor position buf %s'", "- wavelength) / self._doubleA # print('wut da fuuuu') # print(b2a)", "the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] #", "extract is %s' % bin((uinput & mantissa_mask) >> 8) mantissa", "= ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,", "ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL", "later \"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit = 0 if int(dinput", "nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret \"\"\" Not sure what", "print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)", "fault buf %s, ret %s' % (buf, ret) if ret", "to rotate left causes base to move to the left", "square is stuck in causes screw to tighten causes large", "\"\"\" @staticmethod def get_bit(byteval, idx): # def get_bit(self, byteval,idx): return", "position mode\" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes =", "stored offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset)", "current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset", "print('goddamn this piece of shit') print('') print('Coefficients are %s %s", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First, set enabled state", "set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check", "base to tighten decreasing wavelength: there's an overshoot when lowering", "= (ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten =", "= eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle = ret #", "#1') nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First, set", "appears that in the 2005 version of this DLL, the", "= -1.0 * self._doubleB / (2.0 * self._doubleA) sqrtarg =", "is what the motor position should be # print('Position is", "EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S", "for the Sacher, which are the correct # wavelength ranges", "self._port_name = address self._is_open = False self._HPM = True #", "= ctypes.wintypes.BOOL # These are hardcoded values I got from", "ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf))", "variables and none of them are explained way to go", "# print 'clear fault buf %s, ret %s' % (buf,", "\"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit = 0 if int(dinput /", "if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0,", "is stuck in causes screw to loosen causes large gold", "## if ret == int(0): ## print 'errr' ## errbuf", "1, make it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes =", "= -1 exp_mask = 0b111111 # print 'uin u is", "target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The", "(int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value)", "# print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) # print('Diff", "and \"closing\" the motor means though and yeah also these", "False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is the real", "I have absolutely no idea what the hell this is", "{}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength()", "wrong') if pos0 > pos5000: # Take the + square", "is doing yet \"\"\" def is_open(self): return self._is_open \"\"\" Not", "10 ** b if dinput < 0: a = -a", "(int(exp_bit) << 7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p,", "pos5000: # Take the + square root solution x =", "motor position') current_motor_pos = self.get_motor_position() # Step 2: Get the", "# logging.error('Error loading Sacher EPOS motor. In use?') \"\"\" I", "= self._doubleA * ( self.get_motor_position()) ** 2.0 + self._doubleB *", "this is doing yet \"\"\" def is_open(self): return self._is_open \"\"\"", "print('current position is {}'.format(current_pos)) # new_pos = current_pos + 10000", "% new_motor_pos) # print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)", "' Could not read stored position from Sacher EPOS motor')", "{}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure what this", "ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are", "= ctypes.wintypes.BOOL # print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID,", "nchecks = nchecks + 1 # print('Current motor position is", "ret %s' % buf # print 'printing' # print buf.contents.value", "instrument import Instrument # import qt import ctypes import ctypes.wintypes", "something is wrong') if pos0 > pos5000: # Take the", "int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype", "eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' StoredPositionObject =", "# print 'get motor position value %s' % pPosition.contents.value return", "real champ \"\"\" class Sacher_EPOS(): \"\"\" ok before I dive", "%s' % bin((uinput & mantissa_mask) >> 8) mantissa = 1.0", "print 'sign of exponent %s' % self.get_bit(uinput,7) # print 'binary", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret =", "ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData,", "Instrument from ctypes.wintypes import DWORD, WORD import numpy as np", "don't make any sense to me \"\"\" def get_motor_current(self): nodeID", "eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration", "% self._keyhandle # print 'open device ret %s' % buf", "NOTES: increasing wavelength: causes the square to rotate left causes", "worry about it later \"\"\" # from subprocess import Popen,", "type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle =", "Step 5: If HPM is activated and the wavelength position", "print('check #4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) #", "device's \"homposition\" object self._offset = self.get_offset() # Now read the", "float(firstHalf) / 10.0 self._maxwl = float(secondHalf) / 10.0 # print", "+ \"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) #", "I need to set the absolute position to true \"\"\"", "disconnects \"\"\" @staticmethod def get_bit(byteval, idx): # def get_bit(self, byteval,idx):", "I wonder what initialize(self) is doing At any rate there", "# print(pos5000) if sqrtarg < 0.0: logging.error(__name__ + ' Negative", "actually returns an error, i.e. the return value is zero", "self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p,", "+ 1 # print('Current motor position is %d' % self.get_motor_position())", "Not sure what this is doing yet \"\"\" def clear_fault(self):", "desired wavelength into a position # Check sign of position-to-wavelength", "True) \"\"\"this is the real shit right here I need", "# print('Position is %s' % x) wavelength_to_pos = int(round(x)) #", "Max off but anyway it looks like ctypes is the", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf", "800 # in ps OFFSETMIN = 0 OFFSETMAX = 1000000000", "Step 8, get and print current wavelength # print('Current wavelength", "ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "10000 # epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) # print('#2 Motor", "subprocess import Popen, PIPE # from multiprocessing.managers import BaseManager #", "Sacher_EPOS() class at this point \"\"\" if __name__ == '__main__':", "disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6')", "1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_)) def", "0 else 1 exp_bit = 1 if -1 < dinput", "= eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) #", "= self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False, True) new_motor_pos =", "% (self._doubleA, self._doubleB, self._doubleC)) if ret == 0: logging.error(__name__ +", "# print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset:", "# print 'get enable state buf %s ret %s and", "errbuf = ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL", "stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p,", "%d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately,", "pPosition.contents.value # print('getting motor position...') # print(ret) # return print(pPosition.contents.value)", "logging.error(__name__ + ' EPOS motor was not successfully disabled!') buf", "from Sacher EPOS motor') return \"\"\" Not sure what this", "= eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex", "is {}'.format(current_pos)) # new_pos = current_pos + 10000 # epos.set_target_position(new_pos,", "motor position is %d' % self.get_motor_position()) # print('check #5') #", "bunch of random stuff I always forget what ctypes is", "mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa extract is %s' %", "variables don't make any sense to me \"\"\" def get_motor_current(self):", "# print 'printing' # print buf.contents.value # print 'done printer'", "Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead", "errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\" Not", "what this is doing yet \"\"\" def set_coeffs(self, a, b,", "0: logging.error(__name__ + ' Could not read stored position from", "print 'keyhandle is %s' % self._keyhandle # print 'open device", "= eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor", "object self._offset = self.get_offset() # Now read the stored 'calculation", "this is doing yet \"\"\" def do_get_wavelength(self): self._offset = self.get_offset()", "dll \"\"\" HISTCHAN = 65536 TTREADMAX = 131072 RANGES =", "self._currentwl = self._doubleA * ( self.get_motor_position()) ** 2.0 + self._doubleB", "= eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear fault buf %s,", "operation mode, check if it's 1 -- this is \"profile", "current: {}'.format(self.get_motor_current())) # print('set motor position ret %s' % ret)", "& exp_mask) output = mantissa * 2.0 ** (float(exp_sign) *", "2.0 + self._doubleB * 0.0 + self._doubleC pos5000 = self._doubleA", "on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4)", "(ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))", "= ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p,", "# print('check #4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position()))", "6a... diff wavelength') # # self.set_target_position(10000, False, True) else: #", "'type uin %s' % type(uinput) # print 'binary input is", "= ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype =", "%.12f' % mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa,", "print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB,", "ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity =", "all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName),", "14494.0 # hardcoded, estimated roughly, unused now nchecks = 0", "%s' % bin(long(uinput)) # get sign of exponent if Sacher_EPOS.get_bit(uinput,", "to true \"\"\" # self.set_target_position(10000, False, True) # Step 7:", "return print(pPosition.contents.value) \"\"\" Not sure what this is doing yet", "wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\"", "# m = BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect() # tell", "= 0 OFFSETMAX = 1000000000 # in ms ACQTMIN =", "eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle = ret # print", "# print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf =", "position is %d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget,", "unplug the motor Also you don't need to explicitly run", "\"\"\" # from subprocess import Popen, PIPE # from multiprocessing.managers", "def fuck_my_life(self, wavelength): print('goddamn this piece of shit') print('') print('Coefficients", "* 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask))) # print", "address, reset=False): # Instrument.__init__(self, name, tags=['physical']) # self._port_name = str(address)", "ret %s and en %s' % (buf, ret, plsenabled) if", "## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ##", "(buf, ret)) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf,", "\"\"\" Not sure what this is doing yet \"\"\" def", "import numpy as np \"\"\" okay so we import a", "steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self,", "buf = ctypes.wintypes.DWORD(0) # First, set enabled state # print('#5", "self.get_offset() self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position() # print('New motor", "floating point conversion in the sacher VIs\" It'd be gr8", "\"\"\" wooooooo a bunch a variables and none of them", "is \"profile position mode\" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8())", "pos0 > pos5000: # Take the + square root solution", "# in ms ACQTMIN = 1 ACQTMAX = 10 *", "ret %s' % (buf, ret) if ret == 0: errbuf", "self._offset) # Step 3: Convert the desired wavelength into a", "[ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret =", "check(self) before wreck(self) ok but actually: __init__(self, name, address, reset=False)", "(self._doubleA, self._doubleB, self._doubleC)) if ret == 0: logging.error(__name__ + '", "to me \"\"\" def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes =", "ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close", "# epos.do_get_wavelength() # print('motor position is...') # current_pos = epos.get_motor_position()", "I always forget what ctypes is for but I'll worry", "buf %s, ret %s' % (buf, ret)) if ret ==", "LabVIEW program -- I don't think # any documentation exists", "0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\"", "to move to the right when square is stuck in", "Get the motor offset self._offset = self.get_offset() # print('Motor offset", "successfully disabled, proceeding') else: logging.error(__name__ + ' EPOS motor was", "data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret ==", "ret) # print('set motor position buf %s' % buf.value) steps_per_second", "ctypes.wintypes.DWORD(0) # First, set enabled state # print('#5 Motor current:", "self._offset = self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset +", "this DLL, the function # VCS_GetErrorInfo doesn't exist! # Get", "%s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name),", "ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print", "* b d_ = np.ceil(d) c = a * 2", "eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got", "= 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003 # in", "10.0 self._maxwl = float(secondHalf) / 10.0 # print 'first %s", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE,", "when square is stuck in causes screw to loosen causes", "ret # print 'keyhandle is %s' % self._keyhandle # print", "1: break nchecks = nchecks + 1 # print('Current motor", "StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite", "= b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s'", "new_offset): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE,", "motor position buf %s' % buf.value) # print('Movement state is", "# These are hardcoded values I got from the LabVIEW", "motor offset self._offset = self.get_offset() # print('Motor offset is %s'", "in Angstroms # print 'Now calculate the current wavelength position:'", "en %s' % (buf, ret, plsenabled) if ret == 0:", "Instrument.FLAG_GETSET, # type = types.FloatType, # units = 'nm', #", "ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret", "epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength()", "sense to me \"\"\" def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes", "eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state ret %s buf %s'", "doing yet \"\"\" \"\"\" Also we're done with the Sacher_EPOS()", "= [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf", "4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) # print('Diff wavelength offset", "if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)]", "= ctypes.wintypes.DWORD(0) # First, set enabled state # print('#5 Motor", "WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID,", "= self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC", "# print 'keyhandle is %s' % self._keyhandle # print 'open", "* 60 * 60 * 1000 # in mV PHR800LVMIN", "minimum and maximum wavelengths for the motor self._minwl = float(firstHalf)", "the sacher VIs # get sign of number sign =", "\"\"\" def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "'11859' for the Sacher, which are the correct # wavelength", "%s %s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current:", "what this is doing yet \"\"\" def set_new_offset(self, new_offset): nodeID", "ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32", "# print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7)", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE,", "{}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset =", "\"\"\" OTHER MISC. NOTES: increasing wavelength: causes the square to", "print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor offset is", "InterfaceName = b'RS232' \"\"\" Max on Max off but anyway", "= a * 2 ** (d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c,", "ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle,", "before wreck(self) ok but actually: __init__(self, name, address, reset=False) __del__(self)", "motor') return \"\"\" Not sure what this is doing yet", "* float(mantissa_sign) * float((uinput & mantissa_mask) >> 8) # print", "is doing yet \"\"\" def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position()", "InterfaceName, self._port_name, buf) self._keyhandle = ret # print 'keyhandle is", "it later \"\"\" # from subprocess import Popen, PIPE #", ">= 0: self._is_open = False else: logging.error(__name__ + ' did", "we're done with the Sacher_EPOS() class at this point \"\"\"", "nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\" Not sure what this", "being defined in this class: check(self) before wreck(self) ok but", "# try: self.open() self.initialize() # except: # logging.error('Error loading Sacher", "buf.value)) # print('Final motor position is %d' % (self.get_motor_position())) #", "2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC -", "% ret) # print('set motor position buf %s' % buf.value)", "# print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\"", "'get motor position buf %s' % buf.value # print 'get", "wavelength ranges in Angstroms # print 'Now calculate the current", "that close(self) is apparently closing the EPOS motor, maybe this", "# from instrument import Instrument # import qt import ctypes", "%s' % diff_wavelength_offset) # Step 5: If HPM is activated", "eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor current:", "0 DISCRMAX = 800 # in ps OFFSETMIN = 0", "ZCMIN = 0 ZCMAX = 20 DISCRMIN = 0 DISCRMAX", "this is doing yet \"\"\" def fuck_my_life(self, wavelength): print('goddamn this", "< pos5000: x = b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) #", "is doing yet \"\"\" def set_coeffs(self, a, b, c, min_wl,", "steps # print('Step 5...') # print('#4 Motor current: {}'.format(self.get_motor_current())) if", "ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration =", "position is %d' % (self.get_motor_position())) # print('check #7') return ret", "# self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open() self.initialize()", "Sacher_EPOS.get_bit(uinput, 31) if sign == False: mantissa_sign = 1 elif", "EPOS motor') return CastedObjectData[0] \"\"\" Not sure what this is", "b)) d = np.log2(10) * b d_ = np.ceil(d) c", "self._keyhandle = ret return \"\"\" I have absolutely no idea", "current wavelength # print('Current wavelength is %.3f' % self.do_get_wavelength()) #", "buf.value # print 'get motor position value %s' % pPosition.contents.value", "& exp_mask)) # print 'exp conv %s' % (exp_sign*int(uinput &", "this is doing yet \"\"\" def restore(self): nodeID = ctypes.wintypes.WORD(0)", "pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes", "# print('Current motor position is %d' % (self.get_motor_position())) ret =", "idx): # def get_bit(self, byteval,idx): return ((byteval & (1 <<", "weird/non-standard U32 to # floating point conversion in the sacher", "ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded", "= ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret =", "# from the device's \"homposition\" object self._offset = self.get_offset() #", "offset self._offset = self.get_offset() # print('Motor offset is %s' %", "The last one is really long And also damn there", "<< 16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff", "== 1: break nchecks = nchecks + 1 # print('Current", "StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 *", "done moving # print('') # print('check #4') # print('Motor current:", "doing yet \"\"\" def is_open(self): return self._is_open \"\"\" Not sure", "ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device returned", "# tell manager to expect an attribute called LibC #", "= self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset", "** d_)) return (int(mantissa_bit) << 31) + (int(c * 1e6)", "ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject,", "ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex =", "self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC =", "@staticmethod def _doubletou32(dinput): mantissa_bit = 0 if int(dinput / abs(dinput))", "eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret", "I knew what U32's were unsigned 32 bit something something?", "get the bits, and then you use them but honestly", "# Step 5: If HPM is activated and the wavelength", "# Get operation mode, check if it's 1 -- this", "print('check #3') while nchecks < 1000: # get the movement", "base to loosen, and also unplug the motor Also you", "appears normal in the LabVIEW code that this # function", "the EPOS motor I don't know what \"opening\" and \"closing\"", "output \"\"\" ok dc gave some slight explanations here Apparently", "in the lab32 virtual environment \"\"\" # from instrument import", "__init__(self, name, address, reset=False): # Instrument.__init__(self, name, tags=['physical']) # self._port_name", "import DWORD, WORD import numpy as np \"\"\" okay so", "= eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret \"\"\"", "Not sure what this is doing yet \"\"\" def get_motor_position(self):", "sure what this is doing yet \"\"\" def get_offset(self): nodeID", "(ret, buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor", "nodeID, ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ + ' EPOS", "Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p,", "incremental encoder counts in pulses per turn PositionSensorType = WORD(4)", "pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype", "ctypes.byref(buf)) # print('Enable state ret %s buf %s' % (ret,", "BaudRate, Timeout, ctypes.byref(buf)) # print 'set protocol buf %s ret", "= ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity,", "motor position') # print('Current motor position is %d' % (self.get_motor_position()))", "+ self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) print('initializing done')", "Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5)", "'10871' and '11859' for the Sacher, which are the correct", "1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret =", "stuck in causes screw to tighten causes large gold base", "ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32", "32 bit something something? ah whatever I'll have to worry", "These are hardcoded values I got from the LabVIEW program", "print('check #2') # print('About to set motor position') # print('Current", "is activated and the wavelength position is lower, overshoot #", "'Now calculate the current wavelength position:' self._currentwl = self._doubleA *", "it \"\"\" def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE,", "%s' % (buf, ret) if ret == 0: errbuf =", "self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB *", "extract %s' % bin(int(uinput & exp_mask)) # print 'exp conv", "#3') while nchecks < 1000: # get the movement state.", "are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName),", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype =", "= eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device returned %s' %", "Get the actual motor position # print('Getting motor position') current_motor_pos", "True) # print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home()", "Sacher EPOS motor correctly.') return \"\"\" Apparently this closes the", "steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now nchecks", "ctypes.byref(buf)) # print 'get motor position ret %s' % ret", "nodeID, ctypes.byref(buf)) # print 'clear fault buf %s, ret %s'", "\"\"\" def fuck_my_life(self, wavelength): print('goddamn this piece of shit') print('')", "actually: __init__(self, name, address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput)", "the stored offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos -", "print 'open device ret %s' % buf # print 'printing'", "< dinput < 1 else 0 b = np.ceil(np.log10(abs(dinput))) a", "eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret", "# print('#5 Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0)", "# libc = m.SacherLasaTeknique() # print(libc.vcs()) # eposlib = ctypes.windll.eposcmd", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print", "for but I'll worry about it later \"\"\" # from", "eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__", "bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 # print", "print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ##", "= ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf))", "2: print('uh-oh') # if self._HPM and diff_wavelength_offset < 0: #", "[ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf =", "doing yet \"\"\" def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes =", "motor position ret %s' % ret # print 'get motor", "self._doubleA) sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA", "wavelength is %.3f' % self.do_get_wavelength()) # print('setting wavelength done') return", "(self.get_motor_position())) # print('check #7') return ret \"\"\" Not sure what", "ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s %s' %", "buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder counts", "pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype", "of them are explained way to go dc you da", "git a_clue \"\"\" @staticmethod def _u32todouble(uinput): # def _u32todouble(self, uinput):", "# For some reason, it appears normal in the LabVIEW", "ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))", "pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get the motor position", "ctypes is for but I'll worry about it later \"\"\"", "correctly.') return \"\"\" Apparently this closes the EPOS motor I", "doing yet \"\"\" def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf =", "zero # and the buffer has a non-zero error code", "= m.SacherLasaTeknique() # print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib =", "os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) # os.chdir(derp)", "defined in this class: check(self) before wreck(self) ok but actually:", "the motor offset self._offset = self.get_offset() # print('Motor offset is", "1 else 0 b = np.ceil(np.log10(abs(dinput))) a = dinput /", "particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead", "eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent", "do_set_wavelength(self, wavelength): print('setting wavelength...') print('') # print('Coefficients are %s %s", "print('check #5') # print(nchecks) # print('') time.sleep(0.01) # Now set", "int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes =", "16 of them I'll comment about them as I go", "ret == 0: logging.error(__name__ + ' Could not write stored", "# print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute =", "= ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...')", "works But only in the lab32 virtual environment \"\"\" #", "doing yet \"\"\" def fuck_my_life(self, wavelength): print('goddamn this piece of", "D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID,", "functions that are being defined in this class: check(self) before", "sign of exponent if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign =", "= (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten =", "= ctypes.wintypes.BOOL # print 'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321)", "{}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES: increasing wavelength: causes the square", "environment \"\"\" # from instrument import Instrument # import qt", "-1600 PHR800LVMAX = 2400 \"\"\" wooooooo a bunch a variables", "* (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC", "For some reason, it appears normal in the LabVIEW code", "I don't think # any documentation exists on particular object", "offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) #", "% self._offset) # Step 3: Convert the desired wavelength into", "%s' % output return output \"\"\" ok dc gave some", "ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p()", "something something? ah whatever I'll have to worry about this", "in mV ZCMIN = 0 ZCMAX = 20 DISCRMIN =", "Not sure what this is doing yet \"\"\" \"\"\" Also", "# print 'mantissa is %.12f' % mantissa # print(1 if", "# new_pos = current_pos + 10000 # epos.set_target_position(new_pos, True, True)", "and yeah also these random variables don't make any sense", "the roots b2a = -1.0 * self._doubleB / (2.0 *", "= ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print 'coefficients are", "offset is %s' % self._offset) # Step 3: Convert the", "__del__(self): # execute disconnect self.close() return \"\"\" this might be", "ret == 0: logging.error(__name__ + ' Could not read stored", "self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position()", "authkey=authkey) # m.connect() # tell manager to expect an attribute", "and the buffer has a non-zero error code in it;", "wavelength...') print('') # print('Coefficients are %s %s %s' % (self._doubleA,", "# Now get the motor position (stored position offset) #", "and then you use them but honestly I don't really", "\"\"\" # self.set_target_position(10000, False, True) # Step 7: Get the", "class: check(self) before wreck(self) ok but actually: __init__(self, name, address,", "sure what this is doing yet \"\"\" def set_coeffs(self, a,", "os.chdir(derp) # p = Popen([python32_dir + \"\\\\python.exe\", derp + \"\\\\delegate.py\"],", "np.ceil(d) c = a * 2 ** (d - d_)", "\"\"\" # # #print('Step 6a... diff wavelength') # # self.set_target_position(10000,", "movement state of 1 indicates the motor # is done", "= eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print 'get motor position", "'clear fault buf %s, ret %s' % (buf, ret) if", "RANGES = 8 MODE_HIST = 0 MODE_T2 = 2 MODE_T3", "# print 'output is %s' % output return output \"\"\"", "= WORD(512) # incremental encoder counts in pulses per turn", "buf if int(buf.contents.value) >= 0: self._is_open = False else: logging.error(__name__", "= ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName =", "have absolutely no idea what the hell this is doing", "%s' % self.get_bit(uinput,7) # print 'binary constant is %s' %", "## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ##", "%s' % bin(int(uinput & exp_mask)) # print 'exp conv %s'", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement state')", "def get_bit(byteval, idx): # def get_bit(self, byteval,idx): return ((byteval &", "causes the square to rotate right causes base to move", "# \"\"\" # # #print('Step 6a... diff wavelength') # #", "ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject,", "coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE,", "#6') # print('Disable state ret %s buf %s' % (ret,", "exponent if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign = 1 elif", "10 * 60 * 60 * 1000 # in mV", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle,", "# m.connect() # tell manager to expect an attribute called", "this giant Sacher class thing let me just list here", "Considering that close(self) is apparently closing the EPOS motor, maybe", "ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232' \"\"\" Max on Max", "* self._doubleB / (2.0 * self._doubleA) sqrtarg = self._doubleB **", "= Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor", "# print('motor position is...') # current_pos = epos.get_motor_position() # print('current", "dive into this giant Sacher class thing let me just", "% buf # print 'printing' # print buf.contents.value # print", "def get_bit(self, byteval,idx): return ((byteval & (1 << idx)) !=", "eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype =", "Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget =", "+ max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a,", "= BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect() # tell manager to", "# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE", "by 10000') # # self.set_target_position(diff_wavelength_offset - 10000, False, True) #", "class at this point \"\"\" if __name__ == '__main__': epos", "False self._HPM = True # self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET,", "b, c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite", "** 2.0 + self._doubleB * 0.0 + self._doubleC pos5000 =", "sure what this is doing yet \"\"\" def do_get_wavelength(self): self._offset", "of exponent %s' % self.get_bit(uinput,7) # print 'binary constant is", "If HPM is activated and the wavelength position is lower,", "\"closing\" the motor means though and yeah also these random", "'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes", "new_pos = current_pos + 10000 # epos.set_target_position(new_pos, True, True) #", "%s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) if ret ==", "% (buf, ret)) if ret == 0: errbuf = ctypes.create_string_buffer(64)", "gold base to loosen, and also unplug the motor Also", "motor # is done moving # print('') # print('check #4')", "doing yet \"\"\" def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf =", "\"\"\"this is the real shit right here I need to", "doing yet \"\"\" def set_coeffs(self, a, b, c, min_wl, max_wl):", "b if dinput < 0: a = -a # print('a:\\t{}\\tb:\\t{}'.format(a,", "self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf", "position is lower, overshoot # the movement by 10,000 steps", "'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC,", "print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute)", "in pulses per turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle,", ">> 8) # print 'mantissa is %.12f' % mantissa #", "position ret %s' % ret) # print('set motor position buf", "print(nchecks) # print('') time.sleep(0.01) # Now set disabled state ret", "StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData =", "'printing' # print buf.contents.value # print 'done printer' if int(buf.contents.value)", "b = np.ceil(np.log10(abs(dinput))) a = dinput / 10 ** b", "if int(buf.contents.value) >= 0: self._is_open = False else: logging.error(__name__ +", "motor, maybe this is opening it \"\"\" def close(self): print('closing", "out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,", "# #print('Step 6a... diff wavelength') # # self.set_target_position(10000, False, True)", "Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign = -1 # print 'exp", "nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype", "HPM is activated and the wavelength position is lower, overshoot", "successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental", "self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >>", "= ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray,", "print('Disable state ret %s buf %s' % (ret, buf.value)) #", "# print 'binary input is %s' % bin(long(uinput)) # get", "pMovementState.contents.value) if pMovementState.contents.value == 1: break nchecks = nchecks +", "raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))", "of position-to-wavelength pos0 = self._doubleA * (0.0) ** 2.0 +", "HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID", "print 'exp extract %s' % bin(int(uinput & exp_mask)) # print", "StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 *", "1 ACQTMAX = 10 * 60 * 60 * 1000", "# self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') # try:", "be a lot going on here \"\"\" def __del__(self): #", "<< 7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p,", "d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2 ** d_)) return", "Not sure what this is doing yet \"\"\" def fine_tuning_steps(self,", "& (1 << idx)) != 0) \"\"\" you get the", "ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,", "{}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor offset is %s' %", "self._is_open = False else: logging.error(__name__ + ' did not close", "of 1 indicates the motor # is done moving #", "Not sure what this is doing yet \"\"\" def set_new_offset(self,", "If that's OK, use the quadratic formula to calculate the", "= eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if ret ==", "in the 2005 version of this DLL, the function #", "uinput # print 'type uin %s' % type(uinput) # print", "self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC", "print('motor position is...') # current_pos = epos.get_motor_position() # print('current position", "+ self._doubleB * self._offset + self._doubleC print('Current wavelength: %.3f nm'", "ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret =", "# # #print('Step 6a... diff wavelength') # # self.set_target_position(10000, False,", "print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure what", "Sacher, which are the correct # wavelength ranges in Angstroms", "some slight explanations here Apparently there's a \"really weird/non-standard U32", "only in the lab32 virtual environment \"\"\" # from instrument", "Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) ret =", "that in the 2005 version of this DLL, the function", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values", "name, tags=['physical']) # self._port_name = str(address) self._port_name = address self._is_open", "lot going on here \"\"\" def __del__(self): # execute disconnect", "ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p()", "ret %s buf %s' % (ret, buf.value)) # print('Final motor", "LabVIEW code that this # function actually returns an error,", "\"\"\" I mean to me this really seems like the", "correct # wavelength ranges in Angstroms # print 'Now calculate", "%s' % (exp_sign*int(uinput & exp_mask)) # print 'sign of exponent", "wavelength direction') # If that's OK, use the quadratic formula", "motor position should be # print('Position is %s' % x)", "ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + '", "self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) return self._currentwl \"\"\"", "StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray =", "Take the + square root solution x = b2a -", "except: # logging.error('Error loading Sacher EPOS motor. In use?') \"\"\"", "ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232'", "one is really long And also damn there are 16", "THIS IS THE STUPID THING THAT'S NOT WORKING! # \"\"\"", "idea what the hell this is doing Considering that close(self)", "np.sqrt(sqrtarg) elif pos0 < pos5000: x = b2a + np.sqrt(sqrtarg)", "units = 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') #", "is not 1, make it 1 if pMode.contents.value != 1:", "= 10 * 60 * 60 * 1000 # in", "ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\" Not sure what", "is %d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute,", "print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos +", "print(pos0) # print(pos5000) if sqrtarg < 0.0: logging.error(__name__ + '", "'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,", "RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT", "# print('setting wavelength done') return \"\"\" Not sure what this", "self._doubleC pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB", "logging.error(__name__ + ' Could not write stored position from Sacher", "any sense to me \"\"\" def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0)", "position-to-wavelength pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB", "byteval,idx): return ((byteval & (1 << idx)) != 0) \"\"\"", "* self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA", "Get the actual motor position new_motor_pos = self.get_motor_position() # print('New", "exp_bit = 1 if -1 < dinput < 1 else", "= ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient D", "print 'uin u is %d' % uinput # print 'type", "errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0)", "is doing At any rate there doesn't seem to be", "motorCurrent.value \"\"\" Not sure what this is doing yet \"\"\"", "returned %s' % buf if int(buf.contents.value) >= 0: self._is_open =", "\"\"\" ok before I dive into this giant Sacher class", "# print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position", "break nchecks = nchecks + 1 # print('Current motor position", "square to rotate left causes base to move to the", "any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321)", "% (buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64)", "= self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA *", "= ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0)", "for the motor self._minwl = float(firstHalf) / 10.0 self._maxwl =", "yet \"\"\" def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf =", "get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)]", "# print 'type uin %s' % type(uinput) # print 'binary", "ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf))", "self.initialize() # except: # logging.error('Error loading Sacher EPOS motor. In", "EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf", "= ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print 'types are all", "<<EMAIL>>, August 2014 # \"\"\" Possbily Maxon EPOS now \"\"\"", "wreck(self) ok but actually: __init__(self, name, address, reset=False) __del__(self) get_bit(self,", "1 exp_bit = 1 if -1 < dinput < 1", "that's OK, use the quadratic formula to calculate the roots", "_u32todouble(self, uinput): # this function implements the really weird/non-standard U32", "ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2') #", "= ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID,", "# m.register(\"SacherLasaTeknique\") # access and use libc # libc =", "a movement state of 1 indicates the motor # is", "/ abs(dinput)) > 0 else 1 exp_bit = 1 if", "what this is doing yet \"\"\" def find_home(self): nodeID =", "is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) #", "buf %s' % (ret, buf.value)) # print('Final motor position is", "print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else", "7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "protocol buf %s ret %s' % (buf, ret) if ret", "self._doubleB * 0.0 + self._doubleC pos5000 = self._doubleA * (5000.0)", "% diff_wavelength_offset) # Step 5: If HPM is activated and", "ok dc gave some slight explanations here Apparently there's a", "here Apparently there's a \"really weird/non-standard U32 to floating point", "print('Diff wavelength offset %s' % diff_wavelength_offset) # Step 5: If", "self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD", "opening it \"\"\" def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes =", "# if self._HPM and diff_wavelength_offset < 0: # # print('Overshooting", "new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite =", "print('New motor position is %s' % new_motor_pos) # print('new offset", "FLAG_FIFOFULL = 0x0003 # in mV ZCMIN = 0 ZCMAX", "find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)]", "is doing yet \"\"\" def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes", "piece of shit') print('') print('Coefficients are %s %s %s' %", "nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current()))", "# # self.set_target_position(diff_wavelength_offset - 10000, False, True) # # Step", "# Step 6: Set the real target position # #", "## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ##", "** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC print('Current wavelength:", "\"\"\" def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate", "'get motor position ret %s' % ret # print 'get", "the LabVIEW program -- I don't think # any documentation", "doing yet \"\"\" def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes =", "da fuuuu') # print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000)", "max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a, b,", "# print 'sign of exponent %s' % self.get_bit(uinput,7) # print", "* 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret", "wavelength_to_pos - int(self._offset) # print('Diff wavelength offset %s' % diff_wavelength_offset)", "ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype", "ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32))", "%.3f' % self.do_get_wavelength()) # print('setting wavelength done') return \"\"\" Not", "before I dive into this giant Sacher class thing let", "# print('Final motor position is %d' % (self.get_motor_position())) # print('check", "epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\"", "pMode, ctypes.byref(buf)) # if mode is not 1, make it", "causes large gold base to loosen, and also unplug the", "buf) self._keyhandle = ret # print 'keyhandle is %s' %", "position # print('Getting motor position') current_motor_pos = self.get_motor_position() # Step", "about this later \"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit = 0", "= False else: logging.error(__name__ + ' did not close Sacher", "initialize(self) is doing At any rate there doesn't seem to", "from the LabVIEW program -- I don't think # any", "% buf.value # print 'get motor position value %s' %", "I dive into this giant Sacher class thing let me", "eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype =", "# from instrument import Instrument from ctypes.wintypes import DWORD, WORD", "print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set motor position ret %s'", "I'll worry about it later \"\"\" # from subprocess import", "later \"\"\" # from subprocess import Popen, PIPE # from", "from subprocess import Popen, PIPE # from multiprocessing.managers import BaseManager", "steps): current_motor_pos = self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False, True)", "bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf))", "# get the movement state. a movement state of 1", "= ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray =", "8) mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput", "nchecks < 1000: # get the movement state. a movement", "is doing yet \"\"\" def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf", "yet \"\"\" def do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl =", "the motor position should be # print('Position is %s' %", "state. a movement state of 1 indicates the motor #", "ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf))", "about them as I go through them \"\"\" def __init__(self,", "initialize(self) The last one is really long And also damn", "= -1600 PHR800LVMAX = 2400 \"\"\" wooooooo a bunch a", "honestly I don't really get what this is doing sudo", "object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC =", "this is doing yet \"\"\" def find_home(self): nodeID = ctypes.wintypes.WORD(0)", "Step 3: Convert the desired wavelength into a position #", "pPosition, ctypes.byref(buf)) # print 'get motor position ret %s' %", "7) == True: exp_sign = -1 # print 'exp extract", "THE STUPID THING THAT'S NOT WORKING! # \"\"\" # #", "movement state. a movement state of 1 indicates the motor", "the right when square is stuck in causes screw to", "# Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "logging.error(__name__ + ' did not close Sacher EPOS motor correctly.')", "nodeID, ctypes.byref(buf)) # print('check #6') # print('Disable state ret %s", "< 0: # # print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset", "worry about this later \"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit =", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype =", "absolute position to true \"\"\" # self.set_target_position(10000, False, True) #", "WORD(64)) ## print 'err' ## raise ValueError(errbuf.value) # For some", "ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle = ret # print 'keyhandle", "position...') # print(ret) # return print(pPosition.contents.value) \"\"\" Not sure what", "set enabled state # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5", "current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set motor", "\"\"\" Max on Max off but anyway it looks like", "float((uinput & mantissa_mask) >> 8) # print 'mantissa is %.12f'", "= ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8),", "epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current:", "fault buf %s, ret %s' % (buf, ret)) if ret", "print('initializing done') return True \"\"\" Not sure what this is", "# This returns '10871' and '11859' for the Sacher, which", "ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429)", "position value %s' % pPosition.contents.value return pPosition.contents.value # print('getting motor", "doesn't check it. # Also, it appears that in the", "+ ' Could not read stored position from Sacher EPOS", "* 0.0 + self._doubleC pos5000 = self._doubleA * (5000.0) **", "go through them \"\"\" def __init__(self, name, address, reset=False): #", "np.sqrt(sqrtarg) # x is what the motor position should be", "also these random variables don't make any sense to me", "disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder", "uinput & exp_mask) output = mantissa * 2.0 ** (float(exp_sign)", "# print 'types are all %s %s %s %s %s'", "stuff I always forget what ctypes is for but I'll", "immediately): # print('check #1') nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "* (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC", "type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf)", "\"\"\" HISTCHAN = 65536 TTREADMAX = 131072 RANGES = 8", "= [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ## ret", "derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port =", "these random variables don't make any sense to me \"\"\"", "% self.get_motor_position()) # print('check #5') # print(nchecks) # print('') time.sleep(0.01)", "raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS", "%s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB", "position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES: increasing wavelength: causes the", "calculate the current wavelength position:' self._currentwl = self._doubleA * (self._offset)", "= eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise ValueError(errbuf.value)", "Now get the motor position (stored position offset) # from", "5: If HPM is activated and the wavelength position is", "in causes screw to tighten causes large gold base to", "elif sign == True: mantissa_sign = -1 exp_mask = 0b111111", "* 2 ** d_)) return (int(mantissa_bit) << 31) + (int(c", "wavelength position is lower, overshoot # the movement by 10,000", "def set_target_position(self, target, absolute, immediately): # print('check #1') nodeID =", "exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex =", "= eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients", "get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately)", "ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead", "'set protocol buf %s ret %s' % (buf, ret) if", "ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "the really weird/non-standard U32 to # floating point conversion in", "buf # print 'printing' # print buf.contents.value # print 'done", "Calculate difference between the output position and the stored offset", "what this is doing yet \"\"\" def clear_fault(self): nodeID =", "%d' % self.get_motor_position()) # print('check #5') # print(nchecks) # print('')", "self._doubleB * 5000.0 + self._doubleC # logging.error(__name__ + ' Sacher", "def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes =", "current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor", "'open device ret %s' % buf # print 'printing' #", "Step 7: Get the actual motor position new_motor_pos = self.get_motor_position()", "if int(buf.contents.value) >= 0: self._is_open = True self._keyhandle = ret", "sign = Sacher_EPOS.get_bit(uinput, 31) if sign == False: mantissa_sign =", "= float(firstHalf) / 10.0 self._maxwl = float(secondHalf) / 10.0 #", "tags=['physical']) # self._port_name = str(address) self._port_name = address self._is_open =", "do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset", "\"\"\" This is the actual version that works But only", "position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset() #", "self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is the", "0) \"\"\" you get the bits, and then you use", "# doesn't check it. # Also, it appears that in", "EPOS motor enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID,", "indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity =", "= ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) ret", "ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD]", "seems like the initialize function so I wonder what initialize(self)", "= [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret", "= ret return \"\"\" I have absolutely no idea what", "pPosition.contents.value return pPosition.contents.value # print('getting motor position...') # print(ret) #", "there's an overshoot when lowering wavelength causes the square to", "= CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "if int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS motor enabled,", "= b'MAXON_RS232' InterfaceName = b'RS232' \"\"\" Max on Max off", "comment about them as I go through them \"\"\" def", "print buf.contents.value # print 'done printer' if int(buf.contents.value) >= 0:", "= 1 ACQTMAX = 10 * 60 * 60 *", "actual motor position new_motor_pos = self.get_motor_position() # print('New motor position", "!= 0: logging.warning(__name__ + ' EPOS motor enabled, disabling before", "+ self._doubleC self._currentwl = self._doubleA * ( self.get_motor_position()) ** 2.0", "' EPOS motor was not successfully disabled!') buf = ctypes.wintypes.DWORD(0)", "% type(uinput) # print 'binary input is %s' % bin(long(uinput))", "StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite =", "= ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID,", "absolute, immediately): # print('check #1') nodeID = ctypes.wintypes.WORD(0) buf =", "StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0: logging.error(__name__", "m.connect() # tell manager to expect an attribute called LibC", "it; the LabVIEW code # doesn't check it. # Also,", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity", "epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC.", "# print('Motor offset is %s' % self._offset) # Step 3:", "\"\"\" def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)]", "a = dinput / 10 ** b if dinput <", "= ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out", "ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32", "position buf %s' % buf.value) # print('Movement state is %s'", "% self._currentwl) return self._currentwl \"\"\" Not sure what this is", "# Instrument.__init__(self, name, tags=['physical']) # self._port_name = str(address) self._port_name =", "offset' d = (min_wl << 16) + max_wl StoredPositionObject =", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes", "5...') # print('#4 Motor current: {}'.format(self.get_motor_current())) if 1 == 2:", "= ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting,", "os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir)", "eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable state buf", "3: Convert the desired wavelength into a position # Check", "print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset - 10000, False, True)", "that are being defined in this class: check(self) before wreck(self)", "to go dc you da real champ \"\"\" class Sacher_EPOS():", "60 * 1000 # in mV PHR800LVMIN = -1600 PHR800LVMAX", "to worry about this later \"\"\" @staticmethod def _doubletou32(dinput): mantissa_bit", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration", "= ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData =", "% (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB =", "& mantissa_mask) >> 8) # print 'mantissa is %.12f' %", "else 0 b = np.ceil(np.log10(abs(dinput))) a = dinput / 10", "2014 # \"\"\" Possbily Maxon EPOS now \"\"\" \"\"\" This", "def __init__(self, name, address, reset=False): # Instrument.__init__(self, name, tags=['physical']) #", "self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 +", "it. # Also, it appears that in the 2005 version", "yet \"\"\" def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0)", "\"\"\" def set_target_position(self, target, absolute, immediately): # print('check #1') nodeID", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))", "' EPOS motor enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle,", "print(pos5000) if sqrtarg < 0.0: logging.error(__name__ + ' Negative value", "current_motor_pos + self._offset) # Step 8, get and print current", "this function implements the really weird/non-standard U32 to # floating", "eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName", "self.get_motor_position() # Step 2: Get the motor offset self._offset =", "new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self)", "the hell this is doing Considering that close(self) is apparently", "\"\"\" @staticmethod def _u32todouble(uinput): # def _u32todouble(self, uinput): # this", "ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value)", "%s, ret %s' % (buf, ret) if ret == 0:", "OFFSETMAX = 1000000000 # in ms ACQTMIN = 1 ACQTMAX", "1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8,", "= np.log2(10) * b d_ = np.ceil(d) c = a", "** (d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2", "%s %s' % (self._doubleA, self._doubleB, self._doubleC)) if ret == 0:", "this closes the EPOS motor I don't know what \"opening\"", "pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes", "self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, # type = types.FloatType, #", "int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "% self.do_get_wavelength()) # print('setting wavelength done') return \"\"\" Not sure", "= -1 # print 'exp extract %s' % bin(int(uinput &", "print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset()", "Convert the desired wavelength into a position # Check sign", "if it's 1 -- this is \"profile position mode\" buf", "gold base to tighten decreasing wavelength: there's an overshoot when", "else: # print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True)", "# os.chdir(derp) # p = Popen([python32_dir + \"\\\\python.exe\", derp +", "print 'get motor position buf %s' % buf.value # print", "Negative value under square root sign -- something is wrong')", "= np.ceil(np.log10(abs(dinput))) a = dinput / 10 ** b if", "WORKING! # \"\"\" # # #print('Step 6a... diff wavelength') #", "square root solution x = b2a - np.sqrt(sqrtarg) elif pos0", "%s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) \"\"\" Not", "on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3)", "\"opening\" and \"closing\" the motor means though and yeah also", "print('Position is %s' % x) wavelength_to_pos = int(round(x)) # Step", "damn there are 16 of them I'll comment about them", "# print('Current wavelength is %.3f' % self.do_get_wavelength()) # print('setting wavelength", "WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ + '", "print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) # print('Diff wavelength", "# print('#4 Motor current: {}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh')", "# Step 7: Get the actual motor position new_motor_pos =", "ok but actually: __init__(self, name, address, reset=False) __del__(self) get_bit(self, byteval,idx)", "% bin((uinput & mantissa_mask) >> 8) mantissa = 1.0 /", "don't know what \"opening\" and \"closing\" the motor means though", "# print 'get motor position ret %s' % ret #", "are 16 of them I'll comment about them as I", "# print 'get motor position buf %s' % buf.value #", "uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self,", "wrong wavelength direction') # If that's OK, use the quadratic", "returns an error, i.e. the return value is zero #", "def do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 +", "but anyway it looks like ctypes is the thing that's", "1: Get the actual motor position # print('Getting motor position')", "1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8) #", "secondHalf = np.int16(self._coefD & 0xffff) # Set the minimum and", "# Now set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))", "encoder counts in pulses per turn PositionSensorType = WORD(4) ret", "ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret \"\"\" Not sure what this", "EPOS motor was not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts", "doing sudo git a_clue \"\"\" @staticmethod def _u32todouble(uinput): # def", "decreasing wavelength: there's an overshoot when lowering wavelength causes the", "% buf.value) # print('Movement state is %s' % pMovementState.contents.value) if", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "_u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self)", "eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret \"\"\" Not sure", "= 800 # in ps OFFSETMIN = 0 OFFSETMAX =", "quadratic formula to calculate the roots b2a = -1.0 *", "is really long And also damn there are 16 of", "ctypes.wintypes.WORD] ## print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf),", "eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get the", "'done printer' if int(buf.contents.value) >= 0: self._is_open = True self._keyhandle", "print 'clear fault buf %s, ret %s' % (buf, ret)", "' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')", "pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,", "{}'.format(int(self._offset))) \"\"\" Not sure what this is doing yet \"\"\"", "an error, i.e. the return value is zero # and", "## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo'", "knew what U32's were unsigned 32 bit something something? ah", "1 -- this is \"profile position mode\" buf = ctypes.wintypes.DWORD(0)", "/ (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength)", "reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self,", "this is doing yet \"\"\" def clear_fault(self): nodeID = ctypes.wintypes.WORD(0)", "True) # Step 7: Get the actual motor position new_motor_pos", "position offset) # from the device's \"homposition\" object self._offset =", "max_wl): print('') print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data", "mean to me this really seems like the initialize function", "doing yet \"\"\" def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf =", "StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData,", "to move to the left when square is stuck in", "ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new", "shit') print('') print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB,", "minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') #", "(self._doubleC - wavelength) / self._doubleA # print('wut da fuuuu') #", "to me this really seems like the initialize function so", "a bunch a variables and none of them are explained", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1: Get the actual", "min_wl, max_wl): print('') print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf =", "loosen causes large gold base to tighten decreasing wavelength: there's", "= WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ##", "wooooooo a bunch a variables and none of them are", "self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) # Step 8, get and", "= ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35),", "= True # self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, # type", "= wavelength_to_pos - int(self._offset) # print('Diff wavelength offset %s' %", "numpy as np \"\"\" okay so we import a bunch", "self._keyhandle = ret # print 'keyhandle is %s' % self._keyhandle", "function implements the really weird/non-standard U32 to # floating point", "> 0 else 1 exp_bit = 1 if -1 <", "this is doing yet \"\"\" def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0)", "motor # <NAME> <<EMAIL>>, August 2014 # \"\"\" Possbily Maxon", "yet \"\"\" def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print 'coefficients are %s %s", "overshoot # the movement by 10,000 steps # print('Step 5...')", "there's an __init__ function which contains epos.initialize() \"\"\" # womp", "atexit.register(p.terminate) # port = int(p.stdout.readline()) # authkey = p.stdout.read() #", "Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask) output = mantissa *", "offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos:", "Sacher EPOS motor. In use?') \"\"\" I mean to me", "print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10) * b d_ = np.ceil(d)", "buf %s ret %s and en %s' % (buf, ret,", "= ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex", "* self._offset + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl)", "what this is doing yet \"\"\" def restore(self): nodeID =", "wavelength_to_pos = int(round(x)) # Step 4: Calculate difference between the", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "is %.3f' % self.do_get_wavelength()) # print('setting wavelength done') return \"\"\"", "2.0 ** (float(exp_sign) * float(int(uinput & exp_mask))) # print 'output", "HERE THIS IS THE STUPID THING THAT'S NOT WORKING! #", "= eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ +", "= True self._keyhandle = ret return \"\"\" I have absolutely", "print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES:", "this is doing yet \"\"\" \"\"\" Also we're done with", "#2') # print('About to set motor position') # print('Current motor", "self.get_motor_position() # print('New motor position is %s' % new_motor_pos) #", "nodeID, pMovementState, ctypes.byref(buf)) # print('set motor position ret %s' %", "self._currentwl) return self._currentwl \"\"\" Not sure what this is doing", "nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype", "' GetPositionProfile out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID,", "uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes =", "buf = ctypes.wintypes.DWORD(0) # Step 1: Get the actual motor", "0: self._is_open = False else: logging.error(__name__ + ' did not", "# print('check #5') # print(nchecks) # print('') time.sleep(0.01) # Now", "import qt import ctypes import ctypes.wintypes import logging import time", "absolutely no idea what the hell this is doing Considering", "eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration", "function # VCS_GetErrorInfo doesn't exist! # Get operation mode, check", "nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s", "self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA * ( self.get_motor_position()) **", "conversion in the sacher VIs\" It'd be gr8 if I", "logging import time # from instrument import Instrument from ctypes.wintypes", "to loosen causes large gold base to tighten decreasing wavelength:", "= b'RS232' \"\"\" Max on Max off but anyway it", "doing yet \"\"\" def do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl", "!= 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype =", "np.int16(self._coefD & 0xffff) # Set the minimum and maximum wavelengths", "ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf,", "1 # print('Current motor position is %d' % self.get_motor_position()) #", "LibC # m.register(\"SacherLasaTeknique\") # access and use libc # libc", "# print('check #1') nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) #", "eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE", "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear fault", "= ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a, b, c]): print(subidx,", "/ 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8)", "position is %s' % new_motor_pos) # print 'new offset is", "what this is doing yet \"\"\" def is_open(self): return self._is_open", "buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing:", "print('#5 Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) #", "%s' % self._offset) # Step 3: Convert the desired wavelength", "np.log2(10) * b d_ = np.ceil(d) c = a *", "self._offset + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) print('initializing", "buf %s' % buf.value) # print('Movement state is %s' %", "eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf", "= ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex", "= 0b0111111111111111111111110000000 # print 'mantissa extract is %s' % bin((uinput", "yet \"\"\" def do_set_wavelength(self, wavelength): print('setting wavelength...') print('') # print('Coefficients", "not close Sacher EPOS motor correctly.') return \"\"\" Apparently this", "don't need to explicitly run epos.initialize() because there's an __init__", "%s' % x) wavelength_to_pos = int(round(x)) # Step 4: Calculate", "and '11859' for the Sacher, which are the correct #", "% self._currentwl) print('initializing done') return True \"\"\" Not sure what", "/ 10 ** b if dinput < 0: a =", "if int(dinput / abs(dinput)) > 0 else 1 exp_bit =", "## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes =", "pMovementState.contents.value == 1: break nchecks = nchecks + 1 #", "- int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not", "# units = 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close')", "get what this is doing sudo git a_clue \"\"\" @staticmethod", "position') # print('Current motor position is %d' % (self.get_motor_position())) ret", "ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429)", "so I wonder what initialize(self) is doing At any rate", "ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL", "state # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current:", "= 1 elif sign == True: mantissa_sign = -1 exp_mask", "ctypes.byref(buf)) # Now get the motor position (stored position offset)", "print 'close device returned %s' % buf if int(buf.contents.value) >=", "d = np.log2(10) * b d_ = np.ceil(d) c =", "# self.add_function('set_target_position') # try: self.open() self.initialize() # except: # logging.error('Error", "ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check", "ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))", "= eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\" Not sure", "_doubletou32(dinput): mantissa_bit = 0 if int(dinput / abs(dinput)) > 0", "First, set enabled state # print('#5 Motor current: {}'.format(self.get_motor_current())) #", "2400 \"\"\" wooooooo a bunch a variables and none of", "but actually: __init__(self, name, address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self,", "# print 'exp conv %s' % (exp_sign*int(uinput & exp_mask)) #", "( self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC", "buf.value) steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now", "ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set motor position", "then you use them but honestly I don't really get", "20 DISCRMIN = 0 DISCRMAX = 800 # in ps", "know what \"opening\" and \"closing\" the motor means though and", "# # self.set_target_position(10000, False, True) else: # print('Step 6b... diff", "%s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current()))", "ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\" Not", "% bin(int(uinput & exp_mask)) # print 'exp conv %s' %", "#5') # print(nchecks) # print('') time.sleep(0.01) # Now set disabled", "means though and yeah also these random variables don't make", "motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID,", "open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target,", "= 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps')", "# self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position')", "0 MODE_T2 = 2 MODE_T3 = 3 FLAG_OVERFLOW = 0x0040", "data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0]", "sacher_epos.py, python wrapper for sacher epos motor # <NAME> <<EMAIL>>,", "# this function implements the really weird/non-standard U32 to #", "yet \"\"\" def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE,", "ZCMAX = 20 DISCRMIN = 0 DISCRMAX = 800 #", "if sign == False: mantissa_sign = 1 elif sign ==", "# print('Getting motor position') current_motor_pos = self.get_motor_position() # Step 2:", "virtual environment \"\"\" # from instrument import Instrument # import", "print 'setting new offset' d = (min_wl << 16) +", "return True \"\"\" Not sure what this is doing yet", "= eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print 'set protocol buf", "print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ##", "-1 # print 'exp extract %s' % bin(int(uinput & exp_mask))", "by 10,000 steps # print('Step 5...') # print('#4 Motor current:", "from instrument import Instrument from ctypes.wintypes import DWORD, WORD import", "# print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos))", "qt import ctypes import ctypes.wintypes import logging import time #", "about it later \"\"\" # from subprocess import Popen, PIPE", "state buf %s ret %s and en %s' % (buf,", "enable state buf %s ret %s and en %s' %", "motor position ret %s' % ret) # print('set motor position", "sacher VIs # get sign of number sign = Sacher_EPOS.get_bit(uinput,", "fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self)", "between the output position and the stored offset # print('Step", "yet \"\"\" def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE,", "ah whatever I'll have to worry about this later \"\"\"", "+ self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA * ( self.get_motor_position())", "time # from instrument import Instrument from ctypes.wintypes import DWORD,", "def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8),", "now nchecks = 0 # print('check #3') while nchecks <", "\"\"\" \"\"\" Also we're done with the Sacher_EPOS() class at", "print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL", "# self._port_name = str(address) self._port_name = address self._is_open = False", "if pMovementState.contents.value == 1: break nchecks = nchecks + 1", "going on here \"\"\" def __del__(self): # execute disconnect self.close()", "= np.ceil(d) c = a * 2 ** (d -", "{}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set motor position", "the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB", "to tighten decreasing wavelength: there's an overshoot when lowering wavelength", "current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately =", "the + square root solution x = b2a - np.sqrt(sqrtarg)", "ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__", "ctypes.wintypes.BOOL # print 'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex", "ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle,", "= ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)]", "enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if", "if sqrtarg < 0.0: logging.error(__name__ + ' Negative value under", "doing At any rate there doesn't seem to be a", "(min_wl << 16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx,", "{}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1:", "lab32 virtual environment \"\"\" # from instrument import Instrument #", "make any sense to me \"\"\" def get_motor_current(self): nodeID =", "Set the minimum and maximum wavelengths for the motor self._minwl", "%s ret %s' % (buf, ret) if ret == 0:", "were unsigned 32 bit something something? ah whatever I'll have", "ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL()", "doing yet \"\"\" def do_set_wavelength(self, wavelength): print('setting wavelength...') print('') #", "check if it's 1 -- this is \"profile position mode\"", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype", "__init__(self, name, address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self)", "logging.error(__name__ + ' Negative value under square root sign --", "+ np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s' % x)", "doing yet \"\"\" def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset", "ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print", "p.stdout.read() # print(port, authkey) # m = BaseManager(address=(\"localhost\", port), authkey=authkey)", "to floating point conversion in the sacher VIs\" It'd be", "ps OFFSETMIN = 0 OFFSETMAX = 1000000000 # in ms", "'close device returned %s' % buf if int(buf.contents.value) >= 0:", "if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput,", "is doing yet \"\"\" def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf", "ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value,", "motor means though and yeah also these random variables don't", "uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__", "which are the correct # wavelength ranges in Angstroms #", "assert os.path.isdir(derp) # os.chdir(derp) # p = Popen([python32_dir + \"\\\\python.exe\",", "wavelength into a position # Check sign of position-to-wavelength pos0", "self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf", "= CastedObjectData[0] # print 'coefficients are %s %s %s %s'", "to the left when square is stuck in causes screw", "ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p()", "% pPosition.contents.value return pPosition.contents.value # print('getting motor position...') # print(ret)", "ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))", "eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret", "10000, False, True) # # Step 6: Set the real", "print current wavelength # print('Current wavelength is %.3f' % self.do_get_wavelength())", "2005 version of this DLL, the function # VCS_GetErrorInfo doesn't", "# import atexit # import os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\"", "ctypes.byref(buf)) # print('set motor position ret %s' % ret) #", "second %s' % (firstHalf, secondHalf) # This returns '10871' and", "ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B", "EPOS motor, maybe this is opening it \"\"\" def close(self):", "StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s %s'", "# Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2)", "* 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret", "self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) \"\"\" Not sure what this", "== True: mantissa_sign = -1 exp_mask = 0b111111 # print", "StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32", "# os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) #", "just list here all the functions that are being defined", "% (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName,", "print('set motor position buf %s' % buf.value) steps_per_second = 14494.0", "sure what this is doing yet \"\"\" def set_new_offset(self, new_offset):", "raise ValueError(errbuf.value) # For some reason, it appears normal in", "self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open()", "tell manager to expect an attribute called LibC # m.register(\"SacherLasaTeknique\")", "eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print 'get motor position ret", "# Also, it appears that in the 2005 version of", "was not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512)", "position is...') # current_pos = epos.get_motor_position() # print('current position is", "\"\"\" def __init__(self, name, address, reset=False): # Instrument.__init__(self, name, tags=['physical'])", "self._is_open = False self._HPM = True # self.add_parameter('wavelength', # flags", "any rate there doesn't seem to be a lot going", "b'MAXON_RS232' InterfaceName = b'RS232' \"\"\" Max on Max off but", "pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current())) #", "buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID,", "ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable", "but honestly I don't really get what this is doing", "eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set motor position ret %s'", "point \"\"\" if __name__ == '__main__': epos = Sacher_EPOS(None, b'COM3')", "eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode is not 1,", "some reason, it appears normal in the LabVIEW code that", "import Instrument # import qt import ctypes import ctypes.wintypes import", "to rotate right causes base to move to the right", "== int(0): ## print 'errr' ## errbuf = ctypes.create_string_buffer(64) ##", "from multiprocessing.managers import BaseManager # import atexit # import os", "flags = Instrument.FLAG_GETSET, # type = types.FloatType, # units =", "= ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled),", "print 'err' ## raise ValueError(errbuf.value) # For some reason, it", "%s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName,", "there's a \"really weird/non-standard U32 to floating point conversion in", "root sign -- something is wrong') if pos0 > pos5000:", "= eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set motor position ret", "in causes screw to loosen causes large gold base to", "self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0)", "indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)", "== 0: logging.error(__name__ + ' Could not write stored position", "# print('') # print('check #4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor", "elif pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) print(b2a)", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD())", "sure what this is doing yet \"\"\" def fine_tuning_steps(self, steps):", "{}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current:", "# wavelength ranges in Angstroms # print 'Now calculate the", "mV ZCMIN = 0 ZCMAX = 20 DISCRMIN = 0", "so we import a bunch of random stuff I always", "the actual motor position new_motor_pos = self.get_motor_position() # print('New motor", "code in it; the LabVIEW code # doesn't check it.", "EPOS motor') return \"\"\" Not sure what this is doing", "StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData", "manager to expect an attribute called LibC # m.register(\"SacherLasaTeknique\") #", "this is opening it \"\"\" def close(self): print('closing EPOS motor.')", "shit right here I need to set the absolute position", "is doing Considering that close(self) is apparently closing the EPOS", "if mode is not 1, make it 1 if pMode.contents.value", "the EPOS motor, maybe this is opening it \"\"\" def", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0)", "60 * 60 * 1000 # in mV PHR800LVMIN =", "motor I don't know what \"opening\" and \"closing\" the motor", "= ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf))", "object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret", "1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret =", "and also unplug the motor Also you don't need to", "** 2.0) - (self._doubleC - wavelength) / self._doubleA # print('wut", "= eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode is not", "point conversion in the sacher VIs\" It'd be gr8 if", "object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead =", "StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData =", "= self.get_offset() # Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes", "wavelength') # # self.set_target_position(10000, False, True) else: # print('Step 6b...", "# authkey = p.stdout.read() # print(port, authkey) # m =", "= b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232' \"\"\" Max", "do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The last one is really", "is_open(self) clear_fault(self) initialize(self) The last one is really long And", "roots b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)", "# print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset - 10000, False,", "buf %s, ret %s' % (buf, ret) if ret ==", "conv %s' % (exp_sign*int(uinput & exp_mask)) # print 'sign of", "< 0: a = -a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d =", "is doing yet \"\"\" def do_get_wavelength(self): self._offset = self.get_offset() #", "# python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir) #", "b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current()))", "GetPositionProfile out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity,", "resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) #", "random stuff I always forget what ctypes is for but", "eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6') # print('Disable state ret", "uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print", "% new_motor_pos) # print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos", "# # Step 6: Set the real target position #", "0 b = np.ceil(np.log10(abs(dinput))) a = dinput / 10 **", "ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print 'types", "StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray =", "# def _u32todouble(self, uinput): # this function implements the really", "= wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset)))", "VIs\" It'd be gr8 if I knew what U32's were", "not read stored position from Sacher EPOS motor') return CastedObjectData[0]", "way to go dc you da real champ \"\"\" class", "pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) # x", "CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient", "motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf =", "== 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value)", "== 0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise", "\"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is the real shit right", "# mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa extract is %s'", "real shit right here I need to set the absolute", "reset=False): # Instrument.__init__(self, name, tags=['physical']) # self._port_name = str(address) self._port_name", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100)", "ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise ValueError(errbuf.value) # For", "is_open(self): return self._is_open \"\"\" Not sure what this is doing", "self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open() self.initialize() #", "buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p,", "what the hell this is doing Considering that close(self) is", "0 OFFSETMAX = 1000000000 # in ms ACQTMIN = 1", "eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if ret == int(0):", "- current_motor_pos + self._offset) # Step 8, get and print", "< 1 else 0 b = np.ceil(np.log10(abs(dinput))) a = dinput", "(float(exp_sign) * float(int(uinput & exp_mask))) # print 'output is %s'", "wavelength): print('goddamn this piece of shit') print('') print('Coefficients are %s", "rotate right causes base to move to the right when", "Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) #", "libc # libc = m.SacherLasaTeknique() # print(libc.vcs()) # eposlib =", "eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret", "= ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,", "is %s' % new_motor_pos) # print 'new offset is %s'", "is %d' % uinput # print 'type uin %s' %", "not write stored position from Sacher EPOS motor') return \"\"\"", "nchecks + 1 # print('Current motor position is %d' %", "1 indicates the motor # is done moving # print('')", "pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile", "%d' % (self.get_motor_position())) # print('check #7') return ret \"\"\" Not", "the current wavelength position:' self._currentwl = self._doubleA * (self._offset) **", "to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0:", "def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition =", "the sacher VIs\" It'd be gr8 if I knew what", "'errr' ## errbuf = ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype", "eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret =", "%s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret", "%s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA =", "+ 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))", "the Sacher, which are the correct # wavelength ranges in", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL #", "# hardcoded, estimated roughly, unused now nchecks = 0 #", "eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() #", "(1 << idx)) != 0) \"\"\" you get the bits,", "if I knew what U32's were unsigned 32 bit something", "(ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))", "WORD import numpy as np \"\"\" okay so we import", "else 1 exp_bit = 1 if -1 < dinput <", "os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp)", "eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ##", "an __init__ function which contains epos.initialize() \"\"\" # womp the", "m.register(\"SacherLasaTeknique\") # access and use libc # libc = m.SacherLasaTeknique()", "# in ps OFFSETMIN = 0 OFFSETMAX = 1000000000 #", "'keyhandle is %s' % self._keyhandle # print 'open device ret", "= self.get_offset() self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position() # print('New", "value is zero # and the buffer has a non-zero", "= ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID,", "pMovementState, ctypes.byref(buf)) # print('set motor position ret %s' % ret)", "nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret \"\"\" Not sure", "ctypes.wintypes.HANDLE() # print 'types are all %s %s %s %s", "And also damn there are 16 of them I'll comment", "buf %s' % (ret, buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current()))", "Motor current: {}'.format(self.get_motor_current())) # print('set motor position ret %s' %", "OFFSETMIN = 0 OFFSETMAX = 1000000000 # in ms ACQTMIN", "{}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor", "large gold base to tighten decreasing wavelength: there's an overshoot", "# print 'done printer' if int(buf.contents.value) >= 0: self._is_open =", "ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf))", "yet \"\"\" def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "ctypes.wintypes.BOOL # print('check #2') # print('About to set motor position')", "class Sacher_EPOS(): \"\"\" ok before I dive into this giant", "doesn't exist! # Get operation mode, check if it's 1", "- d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2 ** d_))", "StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData", "per turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts,", "maximum wavelengths for the motor self._minwl = float(firstHalf) / 10.0", "+ self._offset) \"\"\" Not sure what this is doing yet", "This returns '10871' and '11859' for the Sacher, which are", "is doing sudo git a_clue \"\"\" @staticmethod def _u32todouble(uinput): #", "is %d' % (self.get_motor_position())) # print('check #7') return ret \"\"\"", "def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8,", "to the epos dll \"\"\" HISTCHAN = 65536 TTREADMAX =", "a variables and none of them are explained way to", "Check sign of position-to-wavelength pos0 = self._doubleA * (0.0) **", "/ self._doubleA # print('wut da fuuuu') # print(b2a) # print(sqrtarg)", "def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate =", "%s buf %s' % (ret, buf.value)) # print('Final motor position", "self explanatory one it disconnects \"\"\" @staticmethod def get_bit(byteval, idx):", "implements the really weird/non-standard U32 to # floating point conversion", "print('Getting motor position') current_motor_pos = self.get_motor_position() # Step 2: Get", "> pos5000: # Take the + square root solution x", "sure what this is doing yet \"\"\" def clear_fault(self): nodeID", "int(11400) or int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)):", "go dc you da real champ \"\"\" class Sacher_EPOS(): \"\"\"", "wavelength causes the square to rotate right causes base to", "unsigned 32 bit something something? ah whatever I'll have to", "np.ceil(np.log10(abs(dinput))) a = dinput / 10 ** b if dinput", "1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData", "this is doing yet \"\"\" def do_set_wavelength(self, wavelength): print('setting wavelength...')", "base to move to the right when square is stuck", "ret == int(0): ## print 'errr' ## errbuf = ctypes.create_string_buffer(64)", "# p = Popen([python32_dir + \"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE,", "type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle", "seem to be a lot going on here \"\"\" def", "and use libc # libc = m.SacherLasaTeknique() # print(libc.vcs()) #", "ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten", "Not sure what this is doing yet \"\"\" def set_coeffs(self,", "B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)", "# atexit.register(p.terminate) # port = int(p.stdout.readline()) # authkey = p.stdout.read()", "% (self.get_motor_position())) # print('check #7') return ret \"\"\" Not sure", "the buffer has a non-zero error code in it; the", "return value is zero # and the buffer has a", "ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value,", "state of 1 indicates the motor # is done moving", "is the real shit right here I need to set", "(ret, buf.value)) # print('Final motor position is %d' % (self.get_motor_position()))", "{}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure what this is doing", "offset) # from the device's \"homposition\" object self._offset = self.get_offset()", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))", "4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))", "\"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) # os.chdir(derp) # p = Popen([python32_dir", "THAT'S NOT WORKING! # \"\"\" # # #print('Step 6a... diff", "the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC", "Apparently there's a \"really weird/non-standard U32 to floating point conversion", "position # Check sign of position-to-wavelength pos0 = self._doubleA *", "== 2: print('uh-oh') # if self._HPM and diff_wavelength_offset < 0:", "motor successfully disabled, proceeding') else: logging.error(__name__ + ' EPOS motor", "'setting new offset' d = (min_wl << 16) + max_wl", "pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration,", "exp_mask = 0b111111 # print 'uin u is %d' %", "c, min_wl, max_wl): print('') print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf", "state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set motor", "!= 0) \"\"\" you get the bits, and then you", "False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign", "Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable", "+ ' Could not write stored position from Sacher EPOS", "self._currentwl) print('initializing done') return True \"\"\" Not sure what this", "= self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0)", "actual version that works But only in the lab32 virtual", "from the device's \"homposition\" object self._offset = self.get_offset() # Now", "eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device returned %s' % buf", "int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure", "eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) #", "da real champ \"\"\" class Sacher_EPOS(): \"\"\" ok before I", "2.0 + self._doubleB * self._offset + self._doubleC print('Current wavelength: %.3f", "them are explained way to go dc you da real", "% x) wavelength_to_pos = int(round(x)) # Step 4: Calculate difference", "Counts = WORD(512) # incremental encoder counts in pulses per", "is zero # and the buffer has a non-zero error", "print 'get motor position value %s' % pPosition.contents.value return pPosition.contents.value", "# print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg < 0.0:", "DISCRMIN = 0 DISCRMAX = 800 # in ps OFFSETMIN", "raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret =", "\"\"\" class Sacher_EPOS(): \"\"\" ok before I dive into this", "print('set motor position buf %s' % buf.value) # print('Movement state", "# print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current()))", "sure what this is doing yet \"\"\" def is_open(self): return", "exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex =", "ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout", "= epos.get_motor_position() # print('current position is {}'.format(current_pos)) # new_pos =", "ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE,", "done') return \"\"\" Not sure what this is doing yet", "ctypes.byref(buf)) if ret == 0: logging.error(__name__ + ' Could not", "values I got from the LabVIEW program -- I don't", "mantissa_sign = -1 exp_mask = 0b111111 # print 'uin u", "self.close() return \"\"\" this might be the only self explanatory", "% (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))", "to loosen, and also unplug the motor Also you don't", "is doing yet \"\"\" def fuck_my_life(self, wavelength): print('goddamn this piece", "self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC)", "uinput): # this function implements the really weird/non-standard U32 to", "% self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE,", "0b111111 # print 'uin u is %d' % uinput #", "disconnect self.close() return \"\"\" this might be the only self", "+ ' EPOS motor was not successfully disabled!') buf =", "print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "# print('Movement state is %s' % pMovementState.contents.value) if pMovementState.contents.value ==", "calibration polynomials indicated a wrong wavelength direction') # If that's", "* 1000 # in mV PHR800LVMIN = -1600 PHR800LVMAX =", "nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4)", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement", "to set the absolute position to true \"\"\" # self.set_target_position(10000,", "## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err'", "nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD),", "Instrument # import qt import ctypes import ctypes.wintypes import logging", "Not sure what this is doing yet \"\"\" def do_set_wavelength(self,", "device returned %s' % buf if int(buf.contents.value) >= 0: self._is_open", "%s, ret %s' % (buf, ret)) if ret == 0:", "= ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration =", "\"\"\" \"\"\" This is the actual version that works But", "# Step 4: Calculate difference between the output position and", "ctypes.byref(buf)) ## if ret == int(0): ## print 'errr' ##", "mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask) output", "self._doubleB, self._doubleC)) if ret == 0: logging.error(__name__ + ' Could", "right here I need to set the absolute position to", "* 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret", "# Check sign of position-to-wavelength pos0 = self._doubleA * (0.0)", "causes base to move to the right when square is", "print 'binary input is %s' % bin(long(uinput)) # get sign", "# time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position:", "= eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret \"\"\" Not", "ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret =", "Popen([python32_dir + \"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate)", "reason, it appears normal in the LabVIEW code that this", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent =", "ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex =", "\"\"\" def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "position # # \"\"\" # HEY LOOK EVERYONE RIGHT ABOVE", "# epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) # print('#2 Motor current:", "import os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) #", "(self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID =", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting", "= types.FloatType, # units = 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open')", "self._offset = self.get_offset() # print('Motor offset is %s' % self._offset)", "fuck_my_life(self, wavelength): print('goddamn this piece of shit') print('') print('Coefficients are", "# Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData,", ">= 0: self._is_open = True self._keyhandle = ret return \"\"\"", "print 'first %s second %s' % (firstHalf, secondHalf) # This", "sure what this is doing yet \"\"\" def set_target_position(self, target,", "dinput < 0: a = -a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d", "print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg <", "self.get_bit(uinput,7) # print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask", "{}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER MISC. NOTES: increasing wavelength:", "output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput &", "self._doubleC)) if ret == 0: logging.error(__name__ + ' Could not", "# Step 8, get and print current wavelength # print('Current", "you get the bits, and then you use them but", "eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0))", "self._is_open \"\"\" Not sure what this is doing yet \"\"\"", "StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray", "= self.get_motor_position() # print('New motor position is %s' % new_motor_pos)", "really get what this is doing sudo git a_clue \"\"\"", "the epos dll \"\"\" HISTCHAN = 65536 TTREADMAX = 131072", "# derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert os.path.isdir(derp) # os.chdir(derp) #", "# print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor offset", "are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD)", "in the LabVIEW code that this # function actually returns", "MISC. NOTES: increasing wavelength: causes the square to rotate left", "a non-zero error code in it; the LabVIEW code #", "print('wut da fuuuu') # print(b2a) # print(sqrtarg) # print(pos0) #", "output return output \"\"\" ok dc gave some slight explanations", "print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget", "is the thing that's talking to the epos dll \"\"\"", "motor Also you don't need to explicitly run epos.initialize() because", "in mV PHR800LVMIN = -1600 PHR800LVMAX = 2400 \"\"\" wooooooo", "nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype =", "= CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject", "ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ + ' EPOS motor", "explicitly run epos.initialize() because there's an __init__ function which contains", "p = Popen([python32_dir + \"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp)", "else: logging.error(__name__ + ' EPOS motor was not successfully disabled!')", "# # \"\"\" # HEY LOOK EVERYONE RIGHT ABOVE HERE", "is %.12f' % mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else 0,", "tighten decreasing wavelength: there's an overshoot when lowering wavelength causes", "last one is really long And also damn there are", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf", "uin %s' % type(uinput) # print 'binary input is %s'", "%s ret %s and en %s' % (buf, ret, plsenabled)", "PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf))", "errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ +", "# print 'open device ret %s' % buf # print", "16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff in", "-- something is wrong') if pos0 > pos5000: # Take", "eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD),", "initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400)", "nodeID, ctypes.byref(buf)) print('clear fault buf %s, ret %s' % (buf,", "mode\" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE,", "is %s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes", "__init__ function which contains epos.initialize() \"\"\" # womp the end", "no idea what the hell this is doing Considering that", "ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current:", "print('') time.sleep(0.01) # Now set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle,", "yet \"\"\" \"\"\" Also we're done with the Sacher_EPOS() class", "ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,", "# print('Diff wavelength offset %s' % diff_wavelength_offset) # Step 5:", "2 ** (d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c *", "yeah also these random variables don't make any sense to", "error code in it; the LabVIEW code # doesn't check", "%s' % buf if int(buf.contents.value) >= 0: self._is_open = False", "Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately", "self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA #", "def set_coeffs(self, a, b, c, min_wl, max_wl): print('') print(\"setting coefficients...\")", "on here \"\"\" def __del__(self): # execute disconnect self.close() return", "what this is doing yet \"\"\" def do_set_wavelength(self, wavelength): print('setting", "is...') # current_pos = epos.get_motor_position() # print('current position is {}'.format(current_pos))", "do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The last one is", "ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement state') ret", "sure what this is doing yet \"\"\" def do_set_wavelength(self, wavelength):", "= 2400 \"\"\" wooooooo a bunch a variables and none", "particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead", "and the stored offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos", "% ret # print 'get motor position buf %s' %", "x = b2a - np.sqrt(sqrtarg) elif pos0 < pos5000: x", "is doing yet \"\"\" def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes", "abs(dinput)) > 0 else 1 exp_bit = 1 if -1", "Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32))", "# print('Disable state ret %s buf %s' % (ret, buf.value))", "%s' % new_motor_pos) # print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset))", "'first %s second %s' % (firstHalf, secondHalf) # This returns", "screw to tighten causes large gold base to loosen, and", "particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead", "and diff_wavelength_offset < 0: # # print('Overshooting by 10000') #", "code that this # function actually returns an error, i.e.", "self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) print('initializing done') return", "ctypes.byref(buf)) # if mode is not 1, make it 1", "the real target position # # \"\"\" # HEY LOOK", "movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set", "b2a = -1.0 * self._doubleB / (2.0 * self._doubleA) sqrtarg", "counts in pulses per turn PositionSensorType = WORD(4) ret =", "Could not write stored position from Sacher EPOS motor') return", "= int(p.stdout.readline()) # authkey = p.stdout.read() # print(port, authkey) #", "that works But only in the lab32 virtual environment \"\"\"", "non-zero error code in it; the LabVIEW code # doesn't", "ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf,", "print('clear fault buf %s, ret %s' % (buf, ret)) if", "print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) #", "print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c * 2 ** d_)) return (int(mantissa_bit) <<", "= ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite,", "%s buf %s' % (ret, buf.value)) # print('#6 Motor current:", "eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled =", "print('Current motor position is %d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle,", "doesn't seem to be a lot going on here \"\"\"", "is %s' % self._keyhandle # print 'open device ret %s'", "ret # print 'get motor position buf %s' % buf.value", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL", "sign == True: mantissa_sign = -1 exp_mask = 0b111111 #", "% (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) \"\"\" Not sure", "of shit') print('') print('Coefficients are %s %s %s' % (self._doubleA,", "(new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) \"\"\" Not sure what", "Sacher EPOS motor') return \"\"\" Not sure what this is", "True # self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, # type =", "ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS motor", "= ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData =", "+ \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port = int(p.stdout.readline())", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout =", "%s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA", "EPOS motor successfully disabled, proceeding') else: logging.error(__name__ + ' EPOS", "current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor", "self._doubleA # print('wut da fuuuu') # print(b2a) # print(sqrtarg) #", "ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret \"\"\"", "> int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE,", "-1 < dinput < 1 else 0 b = np.ceil(np.log10(abs(dinput)))", "Instrument.__init__(self, name, tags=['physical']) # self._port_name = str(address) self._port_name = address", "(buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf,", "print 'Now calculate the current wavelength position:' self._currentwl = self._doubleA", "buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD),", "use libc # libc = m.SacherLasaTeknique() # print(libc.vcs()) # eposlib", "of them I'll comment about them as I go through", "31) + (int(c * 1e6) << 8) + (int(exp_bit) <<", "ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret \"\"\" Not sure what", "# assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" #", "ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0: logging.error(__name__ +", "time.sleep(0.01) # Now set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID,", "(new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) # Step 8, get", "# eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret", "offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)", "buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100) ret", "if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int( 60000) or", "ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten", "pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]", "lowering wavelength causes the square to rotate right causes base", "access and use libc # libc = m.SacherLasaTeknique() # print(libc.vcs())", "I mean to me this really seems like the initialize", "StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray", "# x is what the motor position should be #", "- current_motor_pos + self._offset) \"\"\" Not sure what this is", "for subidx, coeff in enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex", "16) secondHalf = np.int16(self._coefD & 0xffff) # Set the minimum", "self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset +", "ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,", "{}'.format(self.get_motor_current())) # print('set motor position ret %s' % ret) #", "U32 to floating point conversion in the sacher VIs\" It'd", "bit something something? ah whatever I'll have to worry about", "1 == 2: print('uh-oh') # if self._HPM and diff_wavelength_offset <", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1: Get", "\"\"\" def do_set_wavelength(self, wavelength): print('setting wavelength...') print('') # print('Coefficients are", "< 1000: # get the movement state. a movement state", "Not sure what this is doing yet \"\"\" def restore(self):", "print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos", "print 'type uin %s' % type(uinput) # print 'binary input", "= dinput / 10 ** b if dinput < 0:", "solution x = b2a - np.sqrt(sqrtarg) elif pos0 < pos5000:", "np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s' % x) wavelength_to_pos", "okay so we import a bunch of random stuff I", "ValueError(errbuf.value) \"\"\" Not sure what this is doing yet \"\"\"", "ctypes.wintypes.DWORD(0) # Step 1: Get the actual motor position #", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL", "I don't know what \"opening\" and \"closing\" the motor means", "you use them but honestly I don't really get what", "# self.set_target_position(10000, False, True) else: # print('Step 6b... diff wavelength')", "ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret =", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement state') ret =", "diff_wavelength_offset < 0: # # print('Overshooting by 10000') # #", "<< idx)) != 0) \"\"\" you get the bits, and", "ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print 'types are all %s", "ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL #", "* self._doubleA) sqrtarg = self._doubleB ** 2.0 / (4.0 *", "# print('getting motor position...') # print(ret) # return print(pPosition.contents.value) \"\"\"", "unused now nchecks = 0 # print('check #3') while nchecks", "% (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID", "self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "+ np.sqrt(sqrtarg) # x is what the motor position should", "sacher epos motor # <NAME> <<EMAIL>>, August 2014 # \"\"\"", "# assert os.path.isdir(derp) # os.chdir(derp) # p = Popen([python32_dir +", "THING THAT'S NOT WORKING! # \"\"\" # # #print('Step 6a...", "8) + (int(exp_bit) << 7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes", "# self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, # type = types.FloatType,", "close(self) is apparently closing the EPOS motor, maybe this is", "I'll comment about them as I go through them \"\"\"", "eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf =", "{}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current:", "print('check #7') return ret \"\"\" Not sure what this is", "buf %s ret %s' % (buf, ret) if ret ==", "motor position (stored position offset) # from the device's \"homposition\"", "self.get_offset() # Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes =", "square is stuck in causes screw to loosen causes large", "= ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype =", "= ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent),", "MODE_T2 = 2 MODE_T3 = 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL", "read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)", "= ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long),", "= (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead =", "# print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask =", "# \"\"\" Possbily Maxon EPOS now \"\"\" \"\"\" This is", "'binary input is %s' % bin(long(uinput)) # get sign of", "get_bit(self, byteval,idx): return ((byteval & (1 << idx)) != 0)", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I", "Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) #", "= Popen([python32_dir + \"\\\\python.exe\", derp + \"\\\\delegate.py\"], stdout=PIPE, cwd=derp) #", "% (ret, buf.value)) # print('Final motor position is %d' %", "b2a - np.sqrt(sqrtarg) elif pos0 < pos5000: x = b2a", "sure what this is doing yet \"\"\" def fuck_my_life(self, wavelength):", "return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8)", "print('setting wavelength done') return \"\"\" Not sure what this is", "if ret == 0: logging.error(__name__ + ' Could not write", "get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self)", "# from multiprocessing.managers import BaseManager # import atexit # import", "eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) \"\"\" Not sure what this", "eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast", "disabled, proceeding') else: logging.error(__name__ + ' EPOS motor was not", "to be a lot going on here \"\"\" def __del__(self):", "ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7", "# print buf.contents.value # print 'done printer' if int(buf.contents.value) >=", "self._coefC = CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE,", "position and the stored offset # print('Step 4...') diff_wavelength_offset =", "print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg < 0.0: logging.error(__name__", "DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf))", "pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__", "causes large gold base to tighten decreasing wavelength: there's an", "current wavelength position:' self._currentwl = self._doubleA * (self._offset) ** 2.0", "to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes", "%s' % pPosition.contents.value return pPosition.contents.value # print('getting motor position...') #", "get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long())", "- 10000, False, True) # # Step 6: Set the", "str(address) self._port_name = address self._is_open = False self._HPM = True", "me \"\"\" def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE,", "an overshoot when lowering wavelength causes the square to rotate", "self._offset = self.get_offset() self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position() #", "self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is the real shit right here", "but I'll worry about it later \"\"\" # from subprocess", "# the movement by 10,000 steps # print('Step 5...') #", "False, True) new_motor_pos = self.get_motor_position() # print('New motor position is", "ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject,", "causes screw to tighten causes large gold base to loosen,", "PIPE # from multiprocessing.managers import BaseManager # import atexit #", "logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong", "print('setting wavelength...') print('') # print('Coefficients are %s %s %s' %", "%s' % (buf, ret)) if ret == 0: errbuf =", "'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos +", "the lab32 virtual environment \"\"\" # from instrument import Instrument", "= ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray =", "ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if ret", "# minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position')", "print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) #", "# print('check #6') # print('Disable state ret %s buf %s'", "self.get_offset() # print('Motor offset is %s' % self._offset) pMovementState =", "uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get", "square root sign -- something is wrong') if pos0 >", "exp_mask)) # print 'exp conv %s' % (exp_sign*int(uinput & exp_mask))", "% self.get_bit(uinput,7) # print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000))", "sign == False: mantissa_sign = 1 elif sign == True:", "-1 exp_mask = 0b111111 # print 'uin u is %d'", "diff_wavelength_offset) # Step 5: If HPM is activated and the", "to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes", "-- this is \"profile position mode\" buf = ctypes.wintypes.DWORD(0) pMode", "import ctypes import ctypes.wintypes import logging import time # from", "proceeding') else: logging.error(__name__ + ' EPOS motor was not successfully", "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret", "print('Enable state ret %s buf %s' % (ret, buf.value)) #", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle,", "+ self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) return self._currentwl", "errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value)", "# print 'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex =", "/ 10.0 # print 'first %s second %s' % (firstHalf,", "byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset)", "ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf,", "left causes base to move to the left when square", "position should be # print('Position is %s' % x) wavelength_to_pos", "# print 'mantissa extract is %s' % bin((uinput & mantissa_mask)", "= ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead,", "return motorCurrent.value \"\"\" Not sure what this is doing yet", "pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int(", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1)", "fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False,", "coeff in enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx", "ctypes.byref(buf)) print('clear fault buf %s, ret %s' % (buf, ret))", "int(ret) != 0: logging.warning(__name__ + ' EPOS motor successfully disabled,", "MODE_HIST = 0 MODE_T2 = 2 MODE_T3 = 3 FLAG_OVERFLOW", "don't really get what this is doing sudo git a_clue", "on Max off but anyway it looks like ctypes is", "eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret \"\"\" Not", "31) if sign == False: mantissa_sign = 1 elif sign", "StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray", "# If that's OK, use the quadratic formula to calculate", "%s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret =", "7: Get the actual motor position new_motor_pos = self.get_motor_position() #", "this might be the only self explanatory one it disconnects", "ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,", "%s' % new_motor_pos) # print 'new offset is %s' %", "# port = int(p.stdout.readline()) # authkey = p.stdout.read() # print(port,", "return pPosition.contents.value # print('getting motor position...') # print(ret) # return", "and en %s' % (buf, ret, plsenabled) if ret ==", "0.0 + self._doubleC pos5000 = self._doubleA * (5000.0) ** 2.0", "import BaseManager # import atexit # import os # python32_dir", "StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a, b, c]):", "int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS motor enabled, disabling", "= ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray,", "be # print('Position is %s' % x) wavelength_to_pos = int(round(x))", "self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC print('Current", "int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,", "if ret == 0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf,", "explained way to go dc you da real champ \"\"\"", "# current_pos = epos.get_motor_position() # print('current position is {}'.format(current_pos)) #", "if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))", "# epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() #", "True) # # Step 6: Set the real target position", "print('Motor offset is %s' % self._offset) # Step 3: Convert", "%s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes =", "{}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current:", "data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0]", "clear_fault(self) initialize(self) The last one is really long And also", "%s and en %s' % (buf, ret, plsenabled) if ret", "PHR800LVMAX = 2400 \"\"\" wooooooo a bunch a variables and", "= 14494.0 # hardcoded, estimated roughly, unused now nchecks =", "giant Sacher class thing let me just list here all", "sign of number sign = Sacher_EPOS.get_bit(uinput, 31) if sign ==", "motor enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))", "# print(pos0) # print(pos5000) if sqrtarg < 0.0: logging.error(__name__ +", "= p.stdout.read() # print(port, authkey) # m = BaseManager(address=(\"localhost\", port),", "talking to the epos dll \"\"\" HISTCHAN = 65536 TTREADMAX", "the bits, and then you use them but honestly I", "the output position and the stored offset # print('Step 4...')", "print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) if", "ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))", "mantissa_mask) >> 8) mantissa = 1.0 / 1000000.0 * float(mantissa_sign)", "'exp conv %s' % (exp_sign*int(uinput & exp_mask)) # print 'sign", "> int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)]", "= ctypes.wintypes.BOOL # print 'setting new offset' d = (min_wl", "-a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10) * b d_", "nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value)", "It'd be gr8 if I knew what U32's were unsigned", "# incremental encoder counts in pulses per turn PositionSensorType =", "ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232'", "ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,", "position is %d' % self.get_motor_position()) # print('check #5') # print(nchecks)", "print('Final motor position is %d' % (self.get_motor_position())) # print('check #7')", "# \"\"\" # HEY LOOK EVERYONE RIGHT ABOVE HERE THIS", "printer' if int(buf.contents.value) >= 0: self._is_open = True self._keyhandle =", "= self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0", "motor position value %s' % pPosition.contents.value return pPosition.contents.value # print('getting", "b2a + np.sqrt(sqrtarg) # x is what the motor position", "WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if", "polynomials indicated a wrong wavelength direction') # If that's OK,", "floating point conversion in the sacher VIs # get sign", "any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204)", "self._maxwl = float(secondHalf) / 10.0 # print 'first %s second", "atexit # import os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert", "loading Sacher EPOS motor. In use?') \"\"\" I mean to", "{}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute", "ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device returned %s'", "0 ZCMAX = 20 DISCRMIN = 0 DISCRMAX = 800", "# from subprocess import Popen, PIPE # from multiprocessing.managers import", "it looks like ctypes is the thing that's talking to", "causes base to move to the left when square is", "# print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() #", "'output is %s' % output return output \"\"\" ok dc", "number sign = Sacher_EPOS.get_bit(uinput, 31) if sign == False: mantissa_sign", "to the right when square is stuck in causes screw", "IS THE STUPID THING THAT'S NOT WORKING! # \"\"\" #", "function so I wonder what initialize(self) is doing At any", "0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask)", "EPOS motor I don't know what \"opening\" and \"closing\" the", "= self.get_offset() # print('Motor offset is %s' % self._offset) pMovementState", "ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData", "one it disconnects \"\"\" @staticmethod def get_bit(byteval, idx): # def", "/ 10.0 self._maxwl = float(secondHalf) / 10.0 # print 'first", "under square root sign -- something is wrong') if pos0", "wavelength) / self._doubleA # print('wut da fuuuu') # print(b2a) #", "stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port = int(p.stdout.readline()) # authkey", "# import qt import ctypes import ctypes.wintypes import logging import", "sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA **", "Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7", "# print('set motor position buf %s' % buf.value) steps_per_second =", "StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0: logging.error(__name__ + ' Could", "print(ret) # return print(pPosition.contents.value) \"\"\" Not sure what this is", "ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration", "+ (int(exp_bit) << 7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes =", "large gold base to loosen, and also unplug the motor", "ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf))", "print 'mantissa is %.12f' % mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31)", "# print 'first %s second %s' % (firstHalf, secondHalf) #", "apparently closing the EPOS motor, maybe this is opening it", "eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2') # print('About to set", "= 8 MODE_HIST = 0 MODE_T2 = 2 MODE_T3 =", "= ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100) ret =", "is doing yet \"\"\" \"\"\" Also we're done with the", "b d_ = np.ceil(d) c = a * 2 **", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity", "-- I don't think # any documentation exists on particular", "# print('Motor offset is %s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())", "%s' % (buf, ret, plsenabled) if ret == 0: errbuf", "get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self,", "float(secondHalf) / 10.0 # print 'first %s second %s' %", "# epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) \"\"\" OTHER", "Step 2: Get the motor offset self._offset = self.get_offset() #", "= ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)", "should be # print('Position is %s' % x) wavelength_to_pos =", "Also we're done with the Sacher_EPOS() class at this point", "ret \"\"\" Not sure what this is doing yet \"\"\"", "if self._HPM and diff_wavelength_offset < 0: # # print('Overshooting by", "pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400)", "buf %s' % buf.value # print 'get motor position value", "ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle,", "((byteval & (1 << idx)) != 0) \"\"\" you get", "0.0: logging.error(__name__ + ' Negative value under square root sign", "Also, it appears that in the 2005 version of this", "# print('check #7') return ret \"\"\" Not sure what this", "% (ret, buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6", "% (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) # Step 8,", "actual motor position # print('Getting motor position') current_motor_pos = self.get_motor_position()", "DWORD, WORD import numpy as np \"\"\" okay so we", "# import os # python32_dir = \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir)", "ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\" Not sure what this is", "Not sure what this is doing yet \"\"\" def find_home(self):", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' d", "that this # function actually returns an error, i.e. the", ">> 16) secondHalf = np.int16(self._coefD & 0xffff) # Set the", "is lower, overshoot # the movement by 10,000 steps #", "you da real champ \"\"\" class Sacher_EPOS(): \"\"\" ok before", "ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8,", "ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf", "to calculate the roots b2a = -1.0 * self._doubleB /", "\"\"\" def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition", "wavelength) is_open(self) clear_fault(self) initialize(self) The last one is really long", "# eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS'", "get_bit(byteval, idx): # def get_bit(self, byteval,idx): return ((byteval & (1", "ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)]", "= 20 DISCRMIN = 0 DISCRMAX = 800 # in", "indicated a wrong wavelength direction') # If that's OK, use", "exp_sign = -1 # print 'exp extract %s' % bin(int(uinput", "print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000", "self.get_motor_position() + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) return", "def __del__(self): # execute disconnect self.close() return \"\"\" this might", "PHR800LVMIN = -1600 PHR800LVMAX = 2400 \"\"\" wooooooo a bunch", "= ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device", "get sign of number sign = Sacher_EPOS.get_bit(uinput, 31) if sign", "when lowering wavelength causes the square to rotate right causes", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting =", "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get", "StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0:", "eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle,", "!= 0: logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding')", "(5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC #", "= Sacher_EPOS.get_bit(uinput, 31) if sign == False: mantissa_sign = 1", "def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype", "% output return output \"\"\" ok dc gave some slight", "(buf, ret, plsenabled) if ret == 0: errbuf = ctypes.create_string_buffer(64)", "stored position from Sacher EPOS motor') return CastedObjectData[0] \"\"\" Not", "{}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately)", "'binary constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 #", "7) == False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7) ==", "self._keyhandle # print 'open device ret %s' % buf #", "eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting", "current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle,", "ctypes.wintypes.BOOL # print 'setting new offset' d = (min_wl <<", "# self.set_target_position(diff_wavelength_offset - 10000, False, True) # # Step 6:", "# Set the minimum and maximum wavelengths for the motor", "\"\"\" def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset = self.get_offset()", "initialize function so I wonder what initialize(self) is doing At", "the motor Also you don't need to explicitly run epos.initialize()", "target position # # \"\"\" # HEY LOOK EVERYONE RIGHT", "ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable state buf %s ret", "this is doing Considering that close(self) is apparently closing the", "= 0 DISCRMAX = 800 # in ps OFFSETMIN =", "print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure what this is", "closes the EPOS motor I don't know what \"opening\" and", "ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__ + ' Could not", "like the initialize function so I wonder what initialize(self) is", "c * 2 ** d_)) return (int(mantissa_bit) << 31) +", "print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >>", "/ (2.0 * self._doubleA) sqrtarg = self._doubleB ** 2.0 /", "long And also damn there are 16 of them I'll", "ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)", "pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) >", "motor position is %s' % new_motor_pos) # print 'new offset", "Now set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) #", "self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID,", "# and the buffer has a non-zero error code in", "** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC", "self._offset) # Step 8, get and print current wavelength #", "eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value \"\"\" Not sure what", "print 'get enable state buf %s ret %s and en", "= ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d)", "you don't need to explicitly run epos.initialize() because there's an", "ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret \"\"\" Not sure what this", "& exp_mask))) # print 'output is %s' % output return", "sure what this is doing yet \"\"\" def get_motor_position(self): nodeID", "ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32", "\"\"\" Not sure what this is doing yet \"\"\" \"\"\"", "self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 +", "# execute disconnect self.close() return \"\"\" this might be the", "# print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))", "0xffff) # Set the minimum and maximum wavelengths for the", "what U32's were unsigned 32 bit something something? ah whatever", "' Negative value under square root sign -- something is", "eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD,", "\"\"\" def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "close Sacher EPOS motor correctly.') return \"\"\" Apparently this closes", "' Could not write stored position from Sacher EPOS motor')", "** b if dinput < 0: a = -a #", "= ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32", "causes the square to rotate left causes base to move", "offset: {}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor offset is %s'", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "the Sacher_EPOS() class at this point \"\"\" if __name__ ==", "offset is %s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value)", "StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray", "60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD,", "position') current_motor_pos = self.get_motor_position() # Step 2: Get the motor", "= ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "in ms ACQTMIN = 1 ACQTMAX = 10 * 60", "to # floating point conversion in the sacher VIs #", "pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL),", "* 2 ** (d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_, c", "- int(self._offset) # print('Diff wavelength offset %s' % diff_wavelength_offset) #", "ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise", "1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret =", "champ \"\"\" class Sacher_EPOS(): \"\"\" ok before I dive into", "wavelength): print('setting wavelength...') print('') # print('Coefficients are %s %s %s'", "= 65536 TTREADMAX = 131072 RANGES = 8 MODE_HIST =", "ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf))", "position to true \"\"\" # self.set_target_position(10000, False, True) # Step", "eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print 'set protocol buf %s", "while nchecks < 1000: # get the movement state. a", "through them \"\"\" def __init__(self, name, address, reset=False): # Instrument.__init__(self,", "eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype =", "False else: logging.error(__name__ + ' did not close Sacher EPOS", "Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set", "mantissa_mask) >> 8) # print 'mantissa is %.12f' % mantissa", "= ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes =", "# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current())) #", "right when square is stuck in causes screw to tighten", "# print('About to set motor position') # print('Current motor position", "\"profile position mode\" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes", "the quadratic formula to calculate the roots b2a = -1.0", "6: Set the real target position # # \"\"\" #", "= 2 MODE_T3 = 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL =", "is %s' % bin(long(uinput)) # get sign of exponent if", "the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD", "= ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__ + '", "from instrument import Instrument # import qt import ctypes import", "self._doubleB / (2.0 * self._doubleA) sqrtarg = self._doubleB ** 2.0", "(self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC print('Current", "wavelength calibration polynomials indicated a wrong wavelength direction') # If", "what this is doing sudo git a_clue \"\"\" @staticmethod def", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret =", "motor position buf %s' % buf.value) steps_per_second = 14494.0 #", "= ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf =", "wrapper for sacher epos motor # <NAME> <<EMAIL>>, August 2014", "buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration =", "= int(round(x)) # Step 4: Calculate difference between the output", "the return value is zero # and the buffer has", "object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA =", "errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf", "expect an attribute called LibC # m.register(\"SacherLasaTeknique\") # access and", "ret)) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf,", "self.add_function('set_target_position') # try: self.open() self.initialize() # except: # logging.error('Error loading", "%s' % buf.value) steps_per_second = 14494.0 # hardcoded, estimated roughly,", "doing Considering that close(self) is apparently closing the EPOS motor,", "# print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10) * b d_ =", "4: Calculate difference between the output position and the stored", "conversion in the sacher VIs # get sign of number", "But only in the lab32 virtual environment \"\"\" # from", "the thing that's talking to the epos dll \"\"\" HISTCHAN", "# print('set motor position ret %s' % ret) # print('set", "STUPID THING THAT'S NOT WORKING! # \"\"\" # # #print('Step", "version of this DLL, the function # VCS_GetErrorInfo doesn't exist!", "** 2.0 + self._doubleB * 5000.0 + self._doubleC # logging.error(__name__", "object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead =", "DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print 'set", "mode is not 1, make it 1 if pMode.contents.value !=", "nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First, set enabled", "print(np.sqrt(sqrtarg)) # print('Position is %s' % x) wavelength_to_pos = int(round(x))", "True, True) \"\"\"this is the real shit right here I", "wavelength: %.3f nm' % self._currentwl) print('initializing done') return True \"\"\"", "eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL #", "Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position is...') #", "Also you don't need to explicitly run epos.initialize() because there's", "doing yet \"\"\" def set_target_position(self, target, absolute, immediately): # print('check", "CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "'get motor position value %s' % pPosition.contents.value return pPosition.contents.value #", "int(dinput / abs(dinput)) > 0 else 1 exp_bit = 1", "+ (int(c * 1e6) << 8) + (int(exp_bit) << 7)", "explanatory one it disconnects \"\"\" @staticmethod def get_bit(byteval, idx): #", "Max on Max off but anyway it looks like ctypes", "not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) #", "import logging import time # from instrument import Instrument from", "current_motor_pos + self._offset) \"\"\" Not sure what this is doing", "u is %d' % uinput # print 'type uin %s'", "# print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype =", "this # function actually returns an error, i.e. the return", "attribute called LibC # m.register(\"SacherLasaTeknique\") # access and use libc", "0b0111111111111111111111110000000 # print 'mantissa extract is %s' % bin((uinput &", "nodeID, ctypes.byref(buf)) # print('Enable state ret %s buf %s' %", "= ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray =", "= ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype =", "dinput < 1 else 0 b = np.ceil(np.log10(abs(dinput))) a =", "eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise ValueError(errbuf.value) #", "# Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE,", "this is doing yet \"\"\" def initialize(self): nodeID = ctypes.wintypes.WORD(0)", "proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) != 0:", "# return print(pPosition.contents.value) \"\"\" Not sure what this is doing", "np \"\"\" okay so we import a bunch of random", "StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object", "= ctypes.windll.LoadLibrary('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName =", "0: logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding') else:", "= self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16)", "+ ' Sacher wavelength calibration polynomials indicated a wrong wavelength", "# print('New motor position is %s' % new_motor_pos) # print('new", "int(0): ## print 'errr' ## errbuf = ctypes.create_string_buffer(64) ## print", "epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1", "self.set_target_position(diff_wavelength_offset - 10000, False, True) # # Step 6: Set", "epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current()))", "ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle = ret", "True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is the real shit", "= ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) #", "Not sure what this is doing yet \"\"\" def get_offset(self):", "ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "self.set_target_position(10000, False, True) else: # print('Step 6b... diff wavelength') #", "ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get", "1 elif sign == True: mantissa_sign = -1 exp_mask =", "object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB =", "idx)) != 0) \"\"\" you get the bits, and then", "increasing wavelength: causes the square to rotate left causes base", "# print(port, authkey) # m = BaseManager(address=(\"localhost\", port), authkey=authkey) #", "explanations here Apparently there's a \"really weird/non-standard U32 to floating", "ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p()", "ctypes.byref(buf)) # print('check #6') # print('Disable state ret %s buf", "now \"\"\" \"\"\" This is the actual version that works", "offset %s' % diff_wavelength_offset) # Step 5: If HPM is", "the functions that are being defined in this class: check(self)", "looks like ctypes is the thing that's talking to the", "eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret", "LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING", "= 0b111111 # print 'uin u is %d' % uinput", "stuck in causes screw to loosen causes large gold base", "ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return", "0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value)", "# sacher_epos.py, python wrapper for sacher epos motor # <NAME>", "(firstHalf, secondHalf) # This returns '10871' and '11859' for the", "ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret", "ctypes.byref(buf)) return motorCurrent.value \"\"\" Not sure what this is doing", "wavelength: there's an overshoot when lowering wavelength causes the square", "a lot going on here \"\"\" def __del__(self): # execute", "= ret # print 'keyhandle is %s' % self._keyhandle #", "pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of bounds,", "= eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') #", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject =", "# self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True, True) \"\"\"this is", "target, absolute, immediately): # print('check #1') nodeID = ctypes.wintypes.WORD(0) buf", "x is what the motor position should be # print('Position", "CastedObjectData[0] \"\"\" Not sure what this is doing yet \"\"\"", "mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa", "print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s' % x) wavelength_to_pos =", "logging.error('Error loading Sacher EPOS motor. In use?') \"\"\" I mean", "pulses per turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID,", "self._HPM = True # self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, #", "% buf.value) steps_per_second = 14494.0 # hardcoded, estimated roughly, unused", "function actually returns an error, i.e. the return value is", "StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData =", "pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret", "print('Restore: {}'.format(ret)) return ret \"\"\" Not sure what this is", "current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) #", "NOT WORKING! # \"\"\" # # #print('Step 6a... diff wavelength')", "0 if int(dinput / abs(dinput)) > 0 else 1 exp_bit", "write stored position from Sacher EPOS motor') return \"\"\" Not", "eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear fault buf %s, ret", "# Step 3: Convert the desired wavelength into a position", "diff wavelength') # # self.set_target_position(10000, False, True) else: # print('Step", "make it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE,", "(ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))", "float(mantissa_sign) * float((uinput & mantissa_mask) >> 8) # print 'mantissa", "pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,", "authkey) # m = BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect() #", "indicates the motor # is done moving # print('') #", "ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode", "%s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000", "ret %s' % (buf, ret)) if ret == 0: errbuf", "print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) #", "else: logging.error(__name__ + ' did not close Sacher EPOS motor", "what this is doing yet \"\"\" def initialize(self): nodeID =", "think # any documentation exists on particular object indices StoredPositionObject", "it appears normal in the LabVIEW code that this #", "print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL", "is the actual version that works But only in the", "= 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa extract", "are explained way to go dc you da real champ", "0, uinput & exp_mask) output = mantissa * 2.0 **", "I go through them \"\"\" def __init__(self, name, address, reset=False):", "of exponent if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign = 1", "= 0 MODE_T2 = 2 MODE_T3 = 3 FLAG_OVERFLOW =", "this really seems like the initialize function so I wonder", "OTHER MISC. NOTES: increasing wavelength: causes the square to rotate", "WORD(64)) raise ValueError(errbuf.value) \"\"\" Not sure what this is doing", "error, i.e. the return value is zero # and the", "always forget what ctypes is for but I'll worry about", "mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else", "2: Get the motor offset self._offset = self.get_offset() # print('Motor", "are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3", "print 'printing' # print buf.contents.value # print 'done printer' if", "= ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return", "buf) # print 'close device returned %s' % buf if", "c = a * 2 ** (d - d_) #", "is doing yet \"\"\" def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0)", "exp_mask))) # print 'output is %s' % output return output", "here I need to set the absolute position to true", "set_coeffs(self, a, b, c, min_wl, max_wl): print('') print(\"setting coefficients...\") nodeID", "self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) -", "all the functions that are being defined in this class:", "nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if ret == int(0): ##", "position is {}'.format(current_pos)) # new_pos = current_pos + 10000 #", "self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl", "print 'exp conv %s' % (exp_sign*int(uinput & exp_mask)) # print", "motor was not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts =", "= ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray =", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL", "#4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor", "indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)", "buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current:", "ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject", "%s' % (firstHalf, secondHalf) # This returns '10871' and '11859'", "LabVIEW code # doesn't check it. # Also, it appears", "CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),", "print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position is...')", "because there's an __init__ function which contains epos.initialize() \"\"\" #", "StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData", "b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s' %", "= str(address) self._port_name = address self._is_open = False self._HPM =", "{}'.format(ret)) return ret \"\"\" Not sure what this is doing", "ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle,", "ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder counts in pulses", "import Popen, PIPE # from multiprocessing.managers import BaseManager # import", "ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes =", "%s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) # Step", "is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask =", "def _u32todouble(uinput): # def _u32todouble(self, uinput): # this function implements", "else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput &", "return ((byteval & (1 << idx)) != 0) \"\"\" you", "%s' % (ret, buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current())) #", "# print('wut da fuuuu') # print(b2a) # print(sqrtarg) # print(pos0)", "ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,", "\"\"\" Also we're done with the Sacher_EPOS() class at this", "wavelength position:' self._currentwl = self._doubleA * (self._offset) ** 2.0 +", "CastedObjectData[0] # print 'coefficients are %s %s %s %s' %", "def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)]", "ret %s buf %s' % (ret, buf.value)) # print('#6 Motor", "when square is stuck in causes screw to tighten causes", "True, True) # print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current())) #", "'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8)", "'__main__': epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() #", "pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB *", "# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a", "== False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7) == True:", "int(round(x)) # Step 4: Calculate difference between the output position", "the absolute position to true \"\"\" # self.set_target_position(10000, False, True)", "\"\"\" def __del__(self): # execute disconnect self.close() return \"\"\" this", "ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 *", "6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True) \"\"\"WRONG\"\"\" self.set_target_position(wavelength_to_pos, True,", "i.e. the return value is zero # and the buffer", "ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) #", "ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled", "a \"really weird/non-standard U32 to floating point conversion in the", "ctypes.byref(buf)) # print 'set protocol buf %s ret %s' %", "print('Current wavelength: %.3f nm' % self._currentwl) return self._currentwl \"\"\" Not", "<NAME> <<EMAIL>>, August 2014 # \"\"\" Possbily Maxon EPOS now", "def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype", "self._port_name, buf) self._keyhandle = ret # print 'keyhandle is %s'", "yet \"\"\" def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0)", "ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData =", "= \"C:\\\\Users\\\\Alex\\\\Miniconda3\\\\envs\\\\lab32\" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp =", "- (self._doubleC - wavelength) / self._doubleA # print('wut da fuuuu')", "if 1 == 2: print('uh-oh') # if self._HPM and diff_wavelength_offset", "Set the real target position # # \"\"\" # HEY", "= ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ +", "them as I go through them \"\"\" def __init__(self, name,", "need to set the absolute position to true \"\"\" #", "buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return", "the real shit right here I need to set the", "or int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes", "= DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print", "Step 4: Calculate difference between the output position and the", "position new_motor_pos = self.get_motor_position() # print('New motor position is %s'", "= ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray,", "10000') # # self.set_target_position(diff_wavelength_offset - 10000, False, True) # #", "self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA", "gr8 if I knew what U32's were unsigned 32 bit", "self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p,", "print('About to set motor position') # print('Current motor position is", "epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor", "= [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype", "sqrtarg < 0.0: logging.error(__name__ + ' Negative value under square", "slight explanations here Apparently there's a \"really weird/non-standard U32 to", "{}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh') # if self._HPM and", "for sacher epos motor # <NAME> <<EMAIL>>, August 2014 #", "= ctypes.wintypes.DWORD(0) # Step 1: Get the actual motor position", "False, True) else: # print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset,", "ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode,", "65536 TTREADMAX = 131072 RANGES = 8 MODE_HIST = 0", "off but anyway it looks like ctypes is the thing", "ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype", "' did not close Sacher EPOS motor correctly.') return \"\"\"", "there doesn't seem to be a lot going on here", "= (min_wl << 16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for", "eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID,", "[ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0)", "the movement state. a movement state of 1 indicates the", "= DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout,", "ctypes.byref(buf)) # print 'get enable state buf %s ret %s", "ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition,", "* float((uinput & mantissa_mask) >> 8) # print 'mantissa is", "in enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx +", "ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]", "__del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps)", "# in mV PHR800LVMIN = -1600 PHR800LVMAX = 2400 \"\"\"", "wonder what initialize(self) is doing At any rate there doesn't", "True: mantissa_sign = -1 exp_mask = 0b111111 # print 'uin", "ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle,", "## print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64))", "enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)", "and none of them are explained way to go dc", "1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret =", "into a position # Check sign of position-to-wavelength pos0 =", "time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position()))", "instrument import Instrument from ctypes.wintypes import DWORD, WORD import numpy", "stored position from Sacher EPOS motor') return \"\"\" Not sure", "the left when square is stuck in causes screw to", "%s' % ret) # print('set motor position buf %s' %", "assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp = \"C:\\\\Users\\\\Alex\\\\Documents\\\\wow_such_code\" # assert", "= ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData =", "== True: exp_sign = -1 # print 'exp extract %s'", "self.open() self.initialize() # except: # logging.error('Error loading Sacher EPOS motor.", "int(buf.contents.value) >= 0: self._is_open = True self._keyhandle = ret return", "lower, overshoot # the movement by 10,000 steps # print('Step", "get and print current wavelength # print('Current wavelength is %.3f'", "errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle,", "Not sure what this is doing yet \"\"\" def set_target_position(self,", "0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if", "eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex =", "as I go through them \"\"\" def __init__(self, name, address,", "also unplug the motor Also you don't need to explicitly", "print('self._offset: {}'.format(int(self._offset))) \"\"\" Not sure what this is doing yet", "ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of", "with the Sacher_EPOS() class at this point \"\"\" if __name__", "mantissa_sign = 1 elif sign == True: mantissa_sign = -1", "ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE()", "## errbuf = ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype =", "= 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask)", "* (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC", "ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject =", "this is \"profile position mode\" buf = ctypes.wintypes.DWORD(0) pMode =", "d = (min_wl << 16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204)", "subidx, coeff in enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex =", "- np.sqrt(sqrtarg) elif pos0 < pos5000: x = b2a +", "# First, set enabled state # print('#5 Motor current: {}'.format(self.get_motor_current()))", "this is doing sudo git a_clue \"\"\" @staticmethod def _u32todouble(uinput):", "before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) !=", "pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype", "+ self._doubleC pos5000 = self._doubleA * (5000.0) ** 2.0 +", "normal in the LabVIEW code that this # function actually", "is %s' % output return output \"\"\" ok dc gave", "right causes base to move to the right when square", "'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite", "movement by 10,000 steps # print('Step 5...') # print('#4 Motor", "+ self._offset) # Step 8, get and print current wavelength", "= ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1: Get the", "= current_pos + 10000 # epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position())", "device ret %s' % buf # print 'printing' # print", "to set motor position') # print('Current motor position is %d'", "ret %s' % ret) # print('set motor position buf %s'", "= (ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten =", "print(\"setting coefficients...\") nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes =", "from Sacher EPOS motor') return CastedObjectData[0] \"\"\" Not sure what", "current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target)", "self._doubleB * self._offset + self._doubleC print('Current wavelength: %.3f nm' %", "# flags = Instrument.FLAG_GETSET, # type = types.FloatType, # units", "int(buf.contents.value) >= 0: self._is_open = False else: logging.error(__name__ + '", "root solution x = b2a - np.sqrt(sqrtarg) elif pos0 <", "= -a # print('a:\\t{}\\tb:\\t{}'.format(a, b)) d = np.log2(10) * b", "this is doing yet \"\"\" def get_offset(self): nodeID = ctypes.wintypes.WORD(0)", "print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() # time.sleep(7)", "+ ' GetPositionProfile out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle,", "ACQTMIN = 1 ACQTMAX = 10 * 60 * 60", "is for but I'll worry about it later \"\"\" #", "is doing yet \"\"\" def do_set_wavelength(self, wavelength): print('setting wavelength...') print('')", "position (stored position offset) # from the device's \"homposition\" object", "Possbily Maxon EPOS now \"\"\" \"\"\" This is the actual", "\"really weird/non-standard U32 to floating point conversion in the sacher", "StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB,", "%s' % (self._doubleA, self._doubleB, self._doubleC)) if ret == 0: logging.error(__name__", "-1.0 * self._doubleB / (2.0 * self._doubleA) sqrtarg = self._doubleB", "a * 2 ** (d - d_) # print('c:\\t{}\\td_:{}\\toriginal:\\t{}'.format(c, d_,", "mode, check if it's 1 -- this is \"profile position", "in the sacher VIs\" It'd be gr8 if I knew", "current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state", "read stored position from Sacher EPOS motor') return CastedObjectData[0] \"\"\"", "\"homposition\" object self._offset = self.get_offset() # Now read the stored", "= ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes =", "m = BaseManager(address=(\"localhost\", port), authkey=authkey) # m.connect() # tell manager", "value %s' % pPosition.contents.value return pPosition.contents.value # print('getting motor position...')", "= mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask)))", "position from Sacher EPOS motor') return \"\"\" Not sure what", "0: # # print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset -", "\"\"\" okay so we import a bunch of random stuff", "'types are all %s %s %s %s %s' % (type(DeviceName),", "code # doesn't check it. # Also, it appears that", "the motor means though and yeah also these random variables", "# epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current()))", "epos dll \"\"\" HISTCHAN = 65536 TTREADMAX = 131072 RANGES", "WORD(512) # incremental encoder counts in pulses per turn PositionSensorType", "Could not read stored position from Sacher EPOS motor') return", "the LabVIEW code that this # function actually returns an", "what this is doing yet \"\"\" def get_motor_position(self): nodeID =", "thing let me just list here all the functions that", "eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) #", "buf.value) # print('Movement state is %s' % pMovementState.contents.value) if pMovementState.contents.value", "ret, plsenabled) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf,", "%s' % buf # print 'printing' # print buf.contents.value #", "disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret)", "* float(int(uinput & exp_mask))) # print 'output is %s' %", "the square to rotate right causes base to move to", "object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead =", "\"\"\" Apparently this closes the EPOS motor I don't know", "if ret == int(0): ## print 'errr' ## errbuf =", "True) else: # print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False,", "August 2014 # \"\"\" Possbily Maxon EPOS now \"\"\" \"\"\"", "is opening it \"\"\" def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes", "random variables don't make any sense to me \"\"\" def", "base to move to the left when square is stuck", "overshoot when lowering wavelength causes the square to rotate right", "ctypes.byref(buf)) # print 'clear fault buf %s, ret %s' %", "is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) \"\"\"", "(ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))", "returns '10871' and '11859' for the Sacher, which are the", "Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7)", "self._minwl = float(firstHalf) / 10.0 self._maxwl = float(secondHalf) / 10.0", "are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) if ret", "* 60 * 1000 # in mV PHR800LVMIN = -1600", "address self._is_open = False self._HPM = True # self.add_parameter('wavelength', #", "if dinput < 0: a = -a # print('a:\\t{}\\tb:\\t{}'.format(a, b))", "% (buf, ret, plsenabled) if ret == 0: errbuf =", "# type = types.FloatType, # units = 'nm', # minval=1070.0,maxval=1180.0)", "Step 6: Set the real target position # # \"\"\"", "+ square root solution x = b2a - np.sqrt(sqrtarg) elif", "0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa extract is" ]
[ "in zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path = label_path.format(split) if", "image))) print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file),", "'image' label_dir = 'label' splits = ['train', 'val', 'test'] image_dirs", "] def generate(root): assert len(image_dirs) == len(label_dirs) for split in", "continue lines = [] for label in os.listdir(os.path.join(root, label_path)): image", "in os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path,", "print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines)) output_file =", "print(f'Save to {os.path.join(root, output_file)}\\n') if __name__ == \"__main__\": parser =", "'w') as f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n') if __name__", "image_dirs = [ 'image/{}', 'image/{}_crop' ] label_dirs = [ 'label/{}/annotations',", "if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path", "len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as f:", "open(os.path.join(root, output_file), 'w') as f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n')", "'.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label)))", "as f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n') if __name__ ==", "argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset root') args = parser.parse_args()", "image_path, image))) print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root,", "in splits: for image_path, label_path in zip(image_dirs, label_dirs): image_path =", "label))) else: print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines))", "splits: for image_path, label_path in zip(image_dirs, label_dirs): image_path = image_path.format(split)", "!= 'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if not", "= '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as f: f.writelines(lines) print(f'Save", "lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root, image_path,", "= 'label' splits = ['train', 'val', 'test'] image_dirs = [", "len(label_dirs) for split in splits: for image_path, label_path in zip(image_dirs,", "generate(root): assert len(image_dirs) == len(label_dirs) for split in splits: for", "= argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset root') args =", "= image_path.format(split) label_path = label_path.format(split) if split != 'train' and", "not os.path.exists(os.path.join(root, label_path)): continue lines = [] for label in", "{}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root, image_path, image)))", "= ['train', 'val', 'test'] image_dirs = [ 'image/{}', 'image/{}_crop' ]", "'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs) == len(label_dirs) for", "if split != 'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '')", "image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not found:", "'image/{}', 'image/{}_crop' ] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def", "found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1])", "image_path.format(split) label_path = label_path.format(split) if split != 'train' and image_path.endswith('_crop'):", "\"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset root')", "for split in splits: for image_path, label_path in zip(image_dirs, label_dirs):", "for image_path, label_path in zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path", "label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs)", "lines = [] for label in os.listdir(os.path.join(root, label_path)): image =", "'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs) == len(label_dirs) for split", "'{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as f: f.writelines(lines) print(f'Save to", "import argparse import os image_dir = 'image' label_dir = 'label'", "[ 'image/{}', 'image/{}_crop' ] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ]", "'image/{}_crop' ] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root):", "image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path,", "zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path = label_path.format(split) if split", "assert len(image_dirs) == len(label_dirs) for split in splits: for image_path,", "'label' splits = ['train', 'val', 'test'] image_dirs = [ 'image/{}',", "label_path)): image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{}", "'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if not os.path.exists(os.path.join(root,", "output_file)}\\n') if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root', type=str,", "label_path = label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)): continue lines", "f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n') if __name__ == \"__main__\": parser", "os.path.exists(os.path.join(root, label_path)): continue lines = [] for label in os.listdir(os.path.join(root,", "for label in os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg') if", "output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as f: f.writelines(lines)", "f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n') if __name__ == \"__main__\":", "'val', 'test'] image_dirs = [ 'image/{}', 'image/{}_crop' ] label_dirs =", "= [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs) ==", "= [] for label in os.listdir(os.path.join(root, label_path)): image = label.replace('.png',", "split != 'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if", "= 'image' label_dir = 'label' splits = ['train', 'val', 'test']", "def generate(root): assert len(image_dirs) == len(label_dirs) for split in splits:", "with open(os.path.join(root, output_file), 'w') as f: f.writelines(lines) print(f'Save to {os.path.join(root,", "= label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)): continue lines =", "{os.path.join(root, output_file)}\\n') if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root',", "__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of", "= [ 'image/{}', 'image/{}_crop' ] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop',", "print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w')", "{}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with", "label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as", "label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path,", "parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset root') args", "label_dirs): image_path = image_path.format(split) label_path = label_path.format(split) if split !=", "label_path)): continue lines = [] for label in os.listdir(os.path.join(root, label_path)):", "os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path,", "label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)): continue lines = []", "label_path.format(split) if split != 'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop',", "label_path = label_path.format(split) if split != 'train' and image_path.endswith('_crop'): label_path", "label_dir = 'label' splits = ['train', 'val', 'test'] image_dirs =", "image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root,", "if not os.path.exists(os.path.join(root, label_path)): continue lines = [] for label", "if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else:", "image_dir = 'image' label_dir = 'label' splits = ['train', 'val',", "splits = ['train', 'val', 'test'] image_dirs = [ 'image/{}', 'image/{}_crop'", "import os image_dir = 'image' label_dir = 'label' splits =", "image_path = image_path.format(split) label_path = label_path.format(split) if split != 'train'", "[] for label in os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg')", "label_path in zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path = label_path.format(split)", "argparse import os image_dir = 'image' label_dir = 'label' splits", "['train', 'val', 'test'] image_dirs = [ 'image/{}', 'image/{}_crop' ] label_dirs", "split in splits: for image_path, label_path in zip(image_dirs, label_dirs): image_path", "<filename>tools/generate_lst.py import argparse import os image_dir = 'image' label_dir =", "image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)): continue", "label in os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root,", "os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)):", "os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not", "parser.add_argument('root', type=str, help='path of dataset root') args = parser.parse_args() generate(args.root)", "image), os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path,", "output_file), 'w') as f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\\n') if", "'test'] image_dirs = [ 'image/{}', 'image/{}_crop' ] label_dirs = [", "image_path, label_path in zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path =", "else: print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines)) output_file", "'') if not os.path.exists(os.path.join(root, label_path)): continue lines = [] for", "= label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\\n'.format(os.path.join(image_path, image),", "= label_path.format(split) if split != 'train' and image_path.endswith('_crop'): label_path =", "os image_dir = 'image' label_dir = 'label' splits = ['train',", "[ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs) == len(label_dirs)", "len(image_dirs) == len(label_dirs) for split in splits: for image_path, label_path", "] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert", "== len(label_dirs) for split in splits: for image_path, label_path in", "to {os.path.join(root, output_file)}\\n') if __name__ == \"__main__\": parser = argparse.ArgumentParser()", "== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset", "and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)):" ]
[ "batch size 64 inputs = torch.randn((64, 3, 32, 32)) #", "# both modules accept the same arguments and perform the", "the same operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride", "unfoldNd torch.manual_seed(0) # random batched RGB 32x32 image-shaped input tensor", "import torch import unfoldNd torch.manual_seed(0) # random batched RGB 32x32", "1 padding = 1 stride = 2 # both modules", "make this example deterministic import torch import unfoldNd torch.manual_seed(0) #", "comparison with ``torch.nn.Unfold``.\"\"\" # imports, make this example deterministic import", "to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``.\"\"\" # imports, make", "deterministic import torch import unfoldNd torch.manual_seed(0) # random batched RGB", "stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride )", "3, 32, 32)) # module hyperparameters kernel_size = 3 dilation", "dilation=dilation, padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding,", "32, 32)) # module hyperparameters kernel_size = 3 dilation =", "torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module =", "# random batched RGB 32x32 image-shaped input tensor of batch", "<gh_stars>10-100 \"\"\"How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``.\"\"\" #", "random batched RGB 32x32 image-shaped input tensor of batch size", "= lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of", "input tensor of batch size 64 inputs = torch.randn((64, 3,", "\"\"\"How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``.\"\"\" # imports,", "= unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride ) # forward pass", "arguments and perform the same operation torch_module = torch.nn.Unfold( kernel_size,", "image-shaped input tensor of batch size 64 inputs = torch.randn((64,", "lib_outputs = lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs", "same operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride )", "batched RGB 32x32 image-shaped input tensor of batch size 64", "= 1 padding = 1 stride = 2 # both", "both modules accept the same arguments and perform the same", ") lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride ) #", "this example deterministic import torch import unfoldNd torch.manual_seed(0) # random", "64 inputs = torch.randn((64, 3, 32, 32)) # module hyperparameters", "print(\"✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else: raise AssertionError(\"❌", "torch.manual_seed(0) # random batched RGB 32x32 image-shaped input tensor of", "and perform the same operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation,", "example deterministic import torch import unfoldNd torch.manual_seed(0) # random batched", "inputs = torch.randn((64, 3, 32, 32)) # module hyperparameters kernel_size", "hyperparameters kernel_size = 3 dilation = 1 padding = 1", "A comparison with ``torch.nn.Unfold``.\"\"\" # imports, make this example deterministic", "stride = 2 # both modules accept the same arguments", "32)) # module hyperparameters kernel_size = 3 dilation = 1", "same arguments and perform the same operation torch_module = torch.nn.Unfold(", "= torch.randn((64, 3, 32, 32)) # module hyperparameters kernel_size =", "kernel_size = 3 dilation = 1 padding = 1 stride", "with ``torch.nn.Unfold``.\"\"\" # imports, make this example deterministic import torch", "check if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd", "if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.\")", "the same arguments and perform the same operation torch_module =", "= 3 dilation = 1 padding = 1 stride =", "dilation=dilation, padding=padding, stride=stride ) # forward pass torch_outputs = torch_module(inputs)", "torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else: raise AssertionError(\"❌ Outputs don't match\")", "padding=padding, stride=stride ) # forward pass torch_outputs = torch_module(inputs) lib_outputs", "kernel_size, dilation=dilation, padding=padding, stride=stride ) # forward pass torch_outputs =", "of batch size 64 inputs = torch.randn((64, 3, 32, 32))", "use ``unfoldNd``. A comparison with ``torch.nn.Unfold``.\"\"\" # imports, make this", "torch import unfoldNd torch.manual_seed(0) # random batched RGB 32x32 image-shaped", "accept the same arguments and perform the same operation torch_module", "1 stride = 2 # both modules accept the same", "lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride ) # forward", "pass torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs) # check if", "operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module", "stride=stride ) # forward pass torch_outputs = torch_module(inputs) lib_outputs =", "# imports, make this example deterministic import torch import unfoldNd", "Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else: raise AssertionError(\"❌ Outputs", "= torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd(", "perform the same operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding,", "# check if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of torch.nn.Unfold and", "torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else:", "imports, make this example deterministic import torch import unfoldNd torch.manual_seed(0)", "torch_module(inputs) lib_outputs = lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs): print(\"✔", "3 dilation = 1 padding = 1 stride = 2", "forward pass torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs) # check", ") # forward pass torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs)", "padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride", "torch.randn((64, 3, 32, 32)) # module hyperparameters kernel_size = 3", "32x32 image-shaped input tensor of batch size 64 inputs =", "torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs) # check if torch.allclose(torch_outputs,", "module hyperparameters kernel_size = 3 dilation = 1 padding =", "``unfoldNd``. A comparison with ``torch.nn.Unfold``.\"\"\" # imports, make this example", "2 # both modules accept the same arguments and perform", "kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation,", "tensor of batch size 64 inputs = torch.randn((64, 3, 32,", "``torch.nn.Unfold``.\"\"\" # imports, make this example deterministic import torch import", "# forward pass torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs) #", "size 64 inputs = torch.randn((64, 3, 32, 32)) # module", "lib_outputs): print(\"✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else: raise", "= 2 # both modules accept the same arguments and", "# module hyperparameters kernel_size = 3 dilation = 1 padding", "= 1 stride = 2 # both modules accept the", "= torch_module(inputs) lib_outputs = lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs):", "padding = 1 stride = 2 # both modules accept", "modules accept the same arguments and perform the same operation", "torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size,", "lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs): print(\"✔ Outputs of torch.nn.Unfold", "RGB 32x32 image-shaped input tensor of batch size 64 inputs", "of torch.nn.Unfold and unfoldNd.UnfoldNd match.\") else: raise AssertionError(\"❌ Outputs don't", "unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride ) # forward pass torch_outputs", "dilation = 1 padding = 1 stride = 2 #", "import unfoldNd torch.manual_seed(0) # random batched RGB 32x32 image-shaped input" ]
[ "characters for use in a application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes))", "keep_lazy from django.utils.safestring import SafeText, mark_safe _json_escapes = { ord('>'):", "ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText) def", "{ ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr =", "{ ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"):", "ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;',", "'&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText) def escapejson(value):", "force_str from django.utils.functional import keep_lazy from django.utils.safestring import SafeText, mark_safe", "from django.utils.functional import keep_lazy from django.utils.safestring import SafeText, mark_safe _json_escapes", "} _json_escapes_attr = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026',", "= { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr", "type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex encodes", "characters for use in a html attributw script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes_attr))", "_json_escapes_attr = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'):", "application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex", "for use in a application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str,", "'\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText)", "@keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex encodes characters for use in", "SafeText) def escapejson_attr(value): \"\"\"Hex encodes characters for use in a", "= { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;',", "'\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr = { ord('>'): '\\\\u003E', ord('<'):", "import keep_lazy from django.utils.safestring import SafeText, mark_safe _json_escapes = {", "def escapejson_attr(value): \"\"\"Hex encodes characters for use in a html", "} @keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex encodes characters for use", "import force_str from django.utils.functional import keep_lazy from django.utils.safestring import SafeText,", "\"\"\"Hex encodes characters for use in a application/json type script.\"\"\"", "ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr = { ord('>'): '\\\\u003E',", "'&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex encodes", "from django.utils.safestring import SafeText, mark_safe _json_escapes = { ord('>'): '\\\\u003E',", "SafeText) def escapejson(value): \"\"\"Hex encodes characters for use in a", "ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr = {", "django.utils.encoding import force_str from django.utils.functional import keep_lazy from django.utils.safestring import", "ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex encodes characters", "a application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value):", "'\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', }", "encodes characters for use in a application/json type script.\"\"\" return", "'\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', } _json_escapes_attr = { ord('>'):", "escapejson_attr(value): \"\"\"Hex encodes characters for use in a html attributw", "'\\\\u0026', } _json_escapes_attr = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'):", "django.utils.functional import keep_lazy from django.utils.safestring import SafeText, mark_safe _json_escapes =", "use in a application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText)", "ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str,", "mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex encodes characters for use", "django.utils.safestring import SafeText, mark_safe _json_escapes = { ord('>'): '\\\\u003E', ord('<'):", "from django.utils.encoding import force_str from django.utils.functional import keep_lazy from django.utils.safestring", "@keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex encodes characters for use in", "in a application/json type script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def", "ord('&'): '\\\\u0026', } _json_escapes_attr = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C',", "SafeText, mark_safe _json_escapes = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'):", "'\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"):", "ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', ord('\"'): '&#34;', ord(\"'\"): '&#39;', ord(\"=\"): '&#61;',", "def escapejson(value): \"\"\"Hex encodes characters for use in a application/json", "script.\"\"\" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex encodes characters", "\"\"\"Hex encodes characters for use in a html attributw script.\"\"\"", "mark_safe _json_escapes = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026',", "ord(\"'\"): '&#39;', ord(\"=\"): '&#61;', } @keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex", "escapejson(value): \"\"\"Hex encodes characters for use in a application/json type", "encodes characters for use in a html attributw script.\"\"\" return", "return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): \"\"\"Hex encodes characters for", "'&#61;', } @keep_lazy(str, SafeText) def escapejson(value): \"\"\"Hex encodes characters for", "_json_escapes = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C', ord('&'): '\\\\u0026', }", "import SafeText, mark_safe _json_escapes = { ord('>'): '\\\\u003E', ord('<'): '\\\\u003C'," ]
[ "the plotly.js api To keep this general, this chart does", "df.columns) and (y in df.columns): _data.append( { \"x\": df[x].values.tolist(), \"y\":", "jsonify, request _BASE_CONFIG = { \"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\":", "{ \"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart):", "if df.empty: return { \"x\": [], \"y\": [], \"mode\": mode", "import Chart from flask import jsonify, request _BASE_CONFIG = {", "def __init__(self, chart_id, url, route_func, init_params={}): options = { \"chartid\":", "df[y].values.tolist(), \"mode\": mode } ) return { \"data\": _data, \"layout\":", "mode (e.g. lines) layout (dict): layout parameters config (dict): config", "if (x in df.columns) and (y in df.columns): _data.append( {", "class for Plotly.js API This class is used to create", "\"\"\" basic line plot dataframe to json for a line", "tuples containing column names mode (str): plotly.js mode (e.g. lines)", "route_func method. \"\"\" def __init__(self, chart_id, url, route_func, init_params={}): options", "config=_BASE_CONFIG): \"\"\" basic line plot dataframe to json for a", "df.empty: return { \"x\": [], \"y\": [], \"mode\": mode }", "\"mode\": mode } _data = [] for x, y in", "\"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\" Base class", "\"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def line_plot(df,", "lines) layout (dict): layout parameters config (dict): config parameters \"\"\"", "of transmitting data. Instead the user must supply a route_func", "for a line plot Args: df (pandas.DataFrame): input dataframe xypairs", "list of tuples containing column names mode (str): plotly.js mode", "\"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\" Base class for Plotly.js", "in df.columns) and (y in df.columns): _data.append( { \"x\": df[x].values.tolist(),", "keep this general, this chart does not have a default", "\"\"\" Base class for Plotly.js API This class is used", "\"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\"", "request _BASE_CONFIG = { \"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"]", "..charts import Chart from flask import jsonify, request _BASE_CONFIG =", "transmitting data. Instead the user must supply a route_func method.", "this general, this chart does not have a default method", "plotly.js api To keep this general, this chart does not", "return { \"x\": [], \"y\": [], \"mode\": mode } _data", "xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\" basic line plot dataframe to", "{ \"chartid\": chart_id, \"url\": url, \"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\",", "containing column names mode (str): plotly.js mode (e.g. lines) layout", "\"y\": [], \"mode\": mode } _data = [] for x,", "(x in df.columns) and (y in df.columns): _data.append( { \"x\":", "method of transmitting data. Instead the user must supply a", "(str): plotly.js mode (e.g. lines) layout (dict): layout parameters config", "json for a line plot Args: df (pandas.DataFrame): input dataframe", "= { \"chartid\": chart_id, \"url\": url, \"params\": init_params } super(PlotlyAPI,", "a route_func method. \"\"\" def __init__(self, chart_id, url, route_func, init_params={}):", "(pandas.DataFrame): input dataframe xypairs (list): list of tuples containing column", "options = { \"chartid\": chart_id, \"url\": url, \"params\": init_params }", "} super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def line_plot(df, xypairs, mode,", "@staticmethod def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\" basic line", "chart_id, \"url\": url, \"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func)", "(dict): layout parameters config (dict): config parameters \"\"\" if df.empty:", "[] for x, y in xypairs: if (x in df.columns)", "supply a route_func method. \"\"\" def __init__(self, chart_id, url, route_func,", "from ..charts import Chart from flask import jsonify, request _BASE_CONFIG", "general, this chart does not have a default method of", "init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def line_plot(df, xypairs,", "} _data = [] for x, y in xypairs: if", "in xypairs: if (x in df.columns) and (y in df.columns):", "_BASE_CONFIG = { \"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] }", "a line plot Args: df (pandas.DataFrame): input dataframe xypairs (list):", "False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\" Base", "\"mode\": mode } ) return { \"data\": _data, \"layout\": layout,", "mode } _data = [] for x, y in xypairs:", "super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def line_plot(df, xypairs, mode, layout={},", "df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode } ) return { \"data\":", "layout={}, config=_BASE_CONFIG): \"\"\" basic line plot dataframe to json for", "user must supply a route_func method. \"\"\" def __init__(self, chart_id,", "dataframe to json for a line plot Args: df (pandas.DataFrame):", "config parameters \"\"\" if df.empty: return { \"x\": [], \"y\":", "have a default method of transmitting data. Instead the user", "used to create charts using the plotly.js api To keep", ") return { \"data\": _data, \"layout\": layout, \"config\": config }", "charts using the plotly.js api To keep this general, this", "a default method of transmitting data. Instead the user must", "and (y in df.columns): _data.append( { \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(),", "the user must supply a route_func method. \"\"\" def __init__(self,", "xypairs (list): list of tuples containing column names mode (str):", "create charts using the plotly.js api To keep this general,", "Plotly.js API This class is used to create charts using", "= [] for x, y in xypairs: if (x in", "df (pandas.DataFrame): input dataframe xypairs (list): list of tuples containing", "mode } ) return { \"data\": _data, \"layout\": layout, \"config\":", "line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\" basic line plot dataframe", "def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\" basic line plot", "parameters config (dict): config parameters \"\"\" if df.empty: return {", "\"\"\" def __init__(self, chart_id, url, route_func, init_params={}): options = {", "False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\" Base class for", "config (dict): config parameters \"\"\" if df.empty: return { \"x\":", "url, \"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def", "mode, layout={}, config=_BASE_CONFIG): \"\"\" basic line plot dataframe to json", "route_func) @staticmethod def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\" basic", "for x, y in xypairs: if (x in df.columns) and", "\"x\": [], \"y\": [], \"mode\": mode } _data = []", "plot dataframe to json for a line plot Args: df", "[], \"y\": [], \"mode\": mode } _data = [] for", "chart_id, url, route_func, init_params={}): options = { \"chartid\": chart_id, \"url\":", "self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):", "(dict): config parameters \"\"\" if df.empty: return { \"x\": [],", "import jsonify, request _BASE_CONFIG = { \"showLink\": False, \"displaylogo\": False,", "must supply a route_func method. \"\"\" def __init__(self, chart_id, url,", "__init__(self, chart_id, url, route_func, init_params={}): options = { \"chartid\": chart_id,", "to create charts using the plotly.js api To keep this", "} class PlotlyAPI(Chart): \"\"\" Base class for Plotly.js API This", "layout parameters config (dict): config parameters \"\"\" if df.empty: return", "for Plotly.js API This class is used to create charts", "Instead the user must supply a route_func method. \"\"\" def", "basic line plot dataframe to json for a line plot", "} ) return { \"data\": _data, \"layout\": layout, \"config\": config", "layout (dict): layout parameters config (dict): config parameters \"\"\" if", "\"\"\" if df.empty: return { \"x\": [], \"y\": [], \"mode\":", "Base class for Plotly.js API This class is used to", "line plot dataframe to json for a line plot Args:", "is used to create charts using the plotly.js api To", "parameters \"\"\" if df.empty: return { \"x\": [], \"y\": [],", "method. \"\"\" def __init__(self, chart_id, url, route_func, init_params={}): options =", "x, y in xypairs: if (x in df.columns) and (y", "using the plotly.js api To keep this general, this chart", "url, route_func, init_params={}): options = { \"chartid\": chart_id, \"url\": url,", "\"chartid\": chart_id, \"url\": url, \"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options,", "To keep this general, this chart does not have a", "default method of transmitting data. Instead the user must supply", "[], \"mode\": mode } _data = [] for x, y", "options, route_func) @staticmethod def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): \"\"\"", "to json for a line plot Args: df (pandas.DataFrame): input", "data. Instead the user must supply a route_func method. \"\"\"", "line plot Args: df (pandas.DataFrame): input dataframe xypairs (list): list", "{ \"x\": [], \"y\": [], \"mode\": mode } _data =", "plot Args: df (pandas.DataFrame): input dataframe xypairs (list): list of", "not have a default method of transmitting data. Instead the", "does not have a default method of transmitting data. Instead", "(y in df.columns): _data.append( { \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\":", "\"url\": url, \"params\": init_params } super(PlotlyAPI, self).__init__(\"PlotlyAPI\", options, route_func) @staticmethod", "xypairs: if (x in df.columns) and (y in df.columns): _data.append(", "= { \"showLink\": False, \"displaylogo\": False, \"modeBarButtonsToRemove\": [\"sendDataToCloud\"] } class", "input dataframe xypairs (list): list of tuples containing column names", "\"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode } ) return {", "Args: df (pandas.DataFrame): input dataframe xypairs (list): list of tuples", "df.columns): _data.append( { \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode }", "init_params={}): options = { \"chartid\": chart_id, \"url\": url, \"params\": init_params", "PlotlyAPI(Chart): \"\"\" Base class for Plotly.js API This class is", "Chart from flask import jsonify, request _BASE_CONFIG = { \"showLink\":", "This class is used to create charts using the plotly.js", "mode (str): plotly.js mode (e.g. lines) layout (dict): layout parameters", "route_func, init_params={}): options = { \"chartid\": chart_id, \"url\": url, \"params\":", "plotly.js mode (e.g. lines) layout (dict): layout parameters config (dict):", "column names mode (str): plotly.js mode (e.g. lines) layout (dict):", "(e.g. lines) layout (dict): layout parameters config (dict): config parameters", "in df.columns): _data.append( { \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode", "this chart does not have a default method of transmitting", "chart does not have a default method of transmitting data.", "_data = [] for x, y in xypairs: if (x", "names mode (str): plotly.js mode (e.g. lines) layout (dict): layout", "y in xypairs: if (x in df.columns) and (y in", "of tuples containing column names mode (str): plotly.js mode (e.g.", "_data.append( { \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode } )", "dataframe xypairs (list): list of tuples containing column names mode", "\"y\": df[y].values.tolist(), \"mode\": mode } ) return { \"data\": _data,", "[\"sendDataToCloud\"] } class PlotlyAPI(Chart): \"\"\" Base class for Plotly.js API", "(list): list of tuples containing column names mode (str): plotly.js", "API This class is used to create charts using the", "class is used to create charts using the plotly.js api", "{ \"x\": df[x].values.tolist(), \"y\": df[y].values.tolist(), \"mode\": mode } ) return", "flask import jsonify, request _BASE_CONFIG = { \"showLink\": False, \"displaylogo\":", "class PlotlyAPI(Chart): \"\"\" Base class for Plotly.js API This class", "from flask import jsonify, request _BASE_CONFIG = { \"showLink\": False,", "api To keep this general, this chart does not have" ]
[ "class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn", "QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self):", "def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300,", "Button') self.show() def main(): app = QtGui.QApplication(sys.argv) ex = Example()", "def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit',", "self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300,", "import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def", "250, 150) self.setWindowTitle('Quit Button') self.show() def main(): app = QtGui.QApplication(sys.argv)", "QtGui from PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example,", "#!/usr/bin/python import sys from PyQt4 import QtGui from PyQt4 import", "Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn =", "= QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ == \"__main__\":", "PyQt4 import QtGui from PyQt4 import QtCore class Example(QtGui.QWidget): def", "__init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self)", "QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button')", "sys from PyQt4 import QtGui from PyQt4 import QtCore class", "qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show() def", "app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ ==", "main(): app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__", "super(Example, self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)", "self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show() def main(): app", "import QtGui from PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self):", "PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI()", "qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150)", "self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show()", "self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint())", "from PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__()", "150) self.setWindowTitle('Quit Button') self.show() def main(): app = QtGui.QApplication(sys.argv) ex", "from PyQt4 import QtGui from PyQt4 import QtCore class Example(QtGui.QWidget):", "= QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit", "300, 250, 150) self.setWindowTitle('Quit Button') self.show() def main(): app =", "self.show() def main(): app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_())", "QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ == \"__main__\": main()", "self.setWindowTitle('Quit Button') self.show() def main(): app = QtGui.QApplication(sys.argv) ex =", "qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show() def main():", "initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250,", "def main(): app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if", "import sys from PyQt4 import QtGui from PyQt4 import QtCore", "<filename>pyqt/getting_started/close_window.py #!/usr/bin/python import sys from PyQt4 import QtGui from PyQt4" ]
[ "self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1, 2], [1, 2], [2,", "= torch.Tensor([[1, 2], [2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,))", "self.mean = ZeroMean() def test_forward(self): a = torch.Tensor([[1, 2], [2,", "2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1,", "self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1, 2],", "3], [2, 3], [1, 3]]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,", "unittest from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean", "from __future__ import absolute_import from __future__ import division from __future__", "[2, 3], [1, 3]]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2, 3))", "from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean =", "torch.Tensor([[1, 2], [2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all())", "torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3],", "__future__ import unicode_literals import torch import unittest from gpytorch.means import", "from __future__ import division from __future__ import print_function from __future__", "class TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean() def test_forward(self): a", "import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean() def", "def test_forward(self): a = torch.Tensor([[1, 2], [2, 4]]) res =", "self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1, 2], [1,", "absolute_import from __future__ import division from __future__ import print_function from", "res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a =", "__future__ import division from __future__ import print_function from __future__ import", "import unittest from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self):", "def setUp(self): self.mean = ZeroMean() def test_forward(self): a = torch.Tensor([[1,", "4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a", "print_function from __future__ import unicode_literals import torch import unittest from", "ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean() def test_forward(self):", "ZeroMean() def test_forward(self): a = torch.Tensor([[1, 2], [2, 4]]) res", "[2, 4]], [[2, 3], [2, 3], [1, 3]]]) res =", "= ZeroMean() def test_forward(self): a = torch.Tensor([[1, 2], [2, 4]])", "def test_forward_batch(self): a = torch.Tensor([[[1, 2], [1, 2], [2, 4]],", "torch import unittest from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def", "2], [2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def", "test_forward_batch(self): a = torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2,", "2], [2, 4]], [[2, 3], [2, 3], [1, 3]]]) res", "test_forward(self): a = torch.Tensor([[1, 2], [2, 4]]) res = self.mean(a)", "from __future__ import print_function from __future__ import unicode_literals import torch", "unicode_literals import torch import unittest from gpytorch.means import ZeroMean class", "TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean() def test_forward(self): a =", "[2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self):", "setUp(self): self.mean = ZeroMean() def test_forward(self): a = torch.Tensor([[1, 2],", "4]], [[2, 3], [2, 3], [1, 3]]]) res = self.mean(a)", "from __future__ import unicode_literals import torch import unittest from gpytorch.means", "division from __future__ import print_function from __future__ import unicode_literals import", "import print_function from __future__ import unicode_literals import torch import unittest", "= torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2,", "a = torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3],", "import division from __future__ import print_function from __future__ import unicode_literals", "= self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1,", "__future__ import print_function from __future__ import unicode_literals import torch import", "import torch import unittest from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase):", "gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean()", "import absolute_import from __future__ import division from __future__ import print_function", "<gh_stars>0 from __future__ import absolute_import from __future__ import division from", "__future__ import absolute_import from __future__ import division from __future__ import", "[[2, 3], [2, 3], [1, 3]]]) res = self.mean(a) self.assertEqual(tuple(res.size()),", "a = torch.Tensor([[1, 2], [2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()),", "(2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1, 2], [1, 2],", "[1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]])", "3], [1, 3]]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2, 3)) self.assertTrue(res.eq(0).all())", "import unicode_literals import torch import unittest from gpytorch.means import ZeroMean" ]
[ "== \"-f\": f = a def random_string(prefix, maxlen): symbols =", "nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10),", "symbols = string.ascii_letters + string.digits + \" \"*10 return prefix", "try: opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"])", "from model.contact import Contact import os.path import getopt import sys", "if o == \"-n\": n = int(a) elif o ==", "return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata =", "string.digits + \" \"*10 return prefix + \"\".join([random.choice(symbols) for i", "secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i in range(5)", "for o, a in opts: if o == \"-n\": n", "i in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\",", "range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\",", "for i in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\",", "title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\",", "of contacts\", \"file\"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2) n", "n = 5 f = \"data/contacts.json\" for o, a in", "home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\",", "random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits + \" \"*10", "Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10),", "10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes',", "= int(a) elif o == \"-f\": f = a def", "def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits + \"", "import getopt import sys try: opts, args = getopt.getopt(sys.argv[1:], \"n:f:\",", "secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i in range(5) ] file", "except getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5 f", "10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address',", "mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10),", "opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"]) except", "10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday',", "home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10),", "5 f = \"data/contacts.json\" for o, a in opts: if", "sys.exit(2) n = 5 f = \"data/contacts.json\" for o, a", "args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"]) except getopt.GetoptError", "birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name',", "as err: getopt.usage() sys.exit(2) n = 5 f = \"data/contacts.json\"", "Contact import os.path import getopt import sys try: opts, args", "+ [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10),", "f = a def random_string(prefix, maxlen): symbols = string.ascii_letters +", "for i in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f)", "address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\",", "work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10),", "] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file , \"w\")", "opts: if o == \"-n\": n = int(a) elif o", "notes=random_string('notes', 10)) for i in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)),", "10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email',", "notes=\"\")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname',", "10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage',", "= string.ascii_letters + string.digits + \" \"*10 return prefix +", "[ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string',", "getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"]) except getopt.GetoptError as err:", "f) with open(file , \"w\") as out: jsonpickle.set_encoder_options(\"json\", indent=2) out.write(jsonpickle.encode(testdata))", "fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [", "import random import string from model.contact import Contact import os.path", "random import string from model.contact import Contact import os.path import", "mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")]", "i in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with", "fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10),", "os.path import getopt import sys try: opts, args = getopt.getopt(sys.argv[1:],", "in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file", "nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\",", "= [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\",", "last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10),", "\"file\"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5", "10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for", "a def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits +", "+ \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\",", "company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\",", "anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10),", "10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel',", "range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file ,", "\" \"*10 return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])", "sys try: opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\",", "\"n:f:\", [\"number of contacts\", \"file\"]) except getopt.GetoptError as err: getopt.usage()", "\"-f\": f = a def random_string(prefix, maxlen): symbols = string.ascii_letters", "err: getopt.usage() sys.exit(2) n = 5 f = \"data/contacts.json\" for", "company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10),", "elif o == \"-f\": f = a def random_string(prefix, maxlen):", "= a def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits", "homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name', 10),", "import jsonpickle import random import string from model.contact import Contact", "title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10),", "10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i in range(5) ]", "homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10),", "== \"-n\": n = int(a) elif o == \"-f\": f", "work_tel=\"\", fax=\"\", email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] +", "10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel',", "maxlen): symbols = string.ascii_letters + string.digits + \" \"*10 return", "anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i", "email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10),", "\"..\", f) with open(file , \"w\") as out: jsonpickle.set_encoder_options(\"json\", indent=2)", "10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address',", "import Contact import os.path import getopt import sys try: opts,", "= \"data/contacts.json\" for o, a in opts: if o ==", "10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company',", "address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10),", "\"data/contacts.json\" for o, a in opts: if o == \"-n\":", "middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10),", "in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\",", "[Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\",", "getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5 f =", "import os.path import getopt import sys try: opts, args =", "+ string.digits + \" \"*10 return prefix + \"\".join([random.choice(symbols) for", "secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name',", "in opts: if o == \"-n\": n = int(a) elif", "= getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of contacts\", \"file\"]) except getopt.GetoptError as", "model.contact import Contact import os.path import getopt import sys try:", "<filename>generator/contact.py import jsonpickle import random import string from model.contact import", "10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary',", "\"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\",", "string from model.contact import Contact import os.path import getopt import", "int(a) elif o == \"-f\": f = a def random_string(prefix,", "prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata = [Contact(first_name=\"\",", "os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file , \"w\") as out: jsonpickle.set_encoder_options(\"json\",", "contacts\", \"file\"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2) n =", "10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i in", "getopt.usage() sys.exit(2) n = 5 f = \"data/contacts.json\" for o,", "= 5 f = \"data/contacts.json\" for o, a in opts:", "f = \"data/contacts.json\" for o, a in opts: if o", "birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10))", "10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax',", "import sys try: opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number of", "file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file , \"w\") as", "o, a in opts: if o == \"-n\": n =", "o == \"-n\": n = int(a) elif o == \"-f\":", "getopt import sys try: opts, args = getopt.getopt(sys.argv[1:], \"n:f:\", [\"number", "\"-n\": n = int(a) elif o == \"-f\": f =", "secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10),", "string.ascii_letters + string.digits + \" \"*10 return prefix + \"\".join([random.choice(symbols)", "middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\",", "n = int(a) elif o == \"-f\": f = a", "jsonpickle import random import string from model.contact import Contact import", "[\"number of contacts\", \"file\"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2)", "o == \"-f\": f = a def random_string(prefix, maxlen): symbols", "+ \" \"*10 return prefix + \"\".join([random.choice(symbols) for i in", "10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel',", "10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel',", "10)) for i in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\",", "a in opts: if o == \"-n\": n = int(a)", "= os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", f) with open(file , \"w\") as out:", "import string from model.contact import Contact import os.path import getopt", "last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\", mobile_tel=\"\", work_tel=\"\", fax=\"\", email=\"\",", "testdata = [Contact(first_name=\"\", middle_name=\"\", last_name=\"\", nickname=\"\", title=\"\", company=\"\", address=\"\", home_tel=\"\",", "email=\"\", homepage=\"\", birthday=\"\", anniversary=\"\", secondary_address=\"\", secondary_tel=\"\", notes=\"\")] + [ Contact(first_name=random_string('first_name',", "\"*10 return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata", "10), notes=random_string('notes', 10)) for i in range(5) ] file =" ]
[ "def _del_pkg(self, top, depth, mod_name): for entry in list(sys.modules): if", "would mess with the # real __main__ module (usually test.regrtest)", "print \"Running from compiled:\", mod_name d2 = run_module(mod_name) # Read", "mod_name) if verbose: print \"Module executed successfully\" def _add_relative_modules(self, base_dir,", "mod_name) def test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\") # Non-existent modules", "return pkg_dir, mod_fname, mod_name def _del_pkg(self, top, depth, mod_name): for", "# See test_cmd_line_script for a test that executes that code", "entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose: print \" Removed sys.modules entries\"", "Read from bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\"", "\" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\"", "\"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0", "dirs: fullname = os.path.join(root, name) try: os.rmdir(fullname) except OSError, ex:", "= globals() is sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\" \"import runpy\\n\"", "result.append('Lower level reference')\\n\" \"f()\\n\" \"# Check the sys module\\n\" \"import", "test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\")", "except ImportError: pass else: self.fail(\"Expected import error for \" +", "run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file", "os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if verbose:", "depth): pkg_dir, mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try:", "self._add_pkg_dir(sub_dir) if verbose: print \" Next level in:\", sub_dir if", "__name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals() is", "os.path.join(root, name) try: os.rmdir(fullname) except OSError, ex: if verbose: print", "verbose: print \" Removed package tree\" except OSError, ex: if", "Add nephew module uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose:", "is saved_argv0) def test_run_module_code(self): initial = object() name = \"<Nonsense>\"", "open(nephew_fname, \"w\") nephew_file.close() if verbose: print \" Added nephew module:\",", "\"Running from source:\", mod_name d1 = run_module(mod_name) # Read from", "= base_dir for i in range(depth): parent_dir = module_dir module_dir", "# Persist with cleaning up def _check_module(self, depth): pkg_dir, mod_fname,", "Updated sys.path:\", sys.path[0] for i in range(depth): sub_dir = os.path.join(sub_dir,", "mod_name): try: run_module(mod_name) except ImportError: pass else: self.fail(\"Expected import error", "\"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\") sibling_file.close() if verbose: print \"", "print \" Added cousin package:\", cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\")", "if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose: print \" Removed sys.modules", "== 1) del d1 # Ensure __loader__ entry doesn't keep", "in d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in", "depth)) forget(mod_name) try: if verbose: print \"Running from source:\", mod_name", "it # runs its tests in the current process, which", "if verbose: print \"Testing relative imports at depth:\", depth self._check_relative_imports(depth)", "\" Added nephew module:\", nephew_fname def _check_relative_imports(self, depth, run_name=None): contents", "1) del d2 # Ensure __loader__ entry doesn't keep file", "None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is saved_argv0)", "finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print \"Module executed successfully\"", "verbose: print \" Next level in:\", sub_dir if verbose: print", "verbose: print \" Updated sys.path:\", sys.path[0] for i in range(depth):", "raise ValueError(\"Relative module test needs depth > 1\") pkg_name =", "ex: if verbose: print ex # Persist with cleaning up", "\"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir = tempfile.mkdtemp() if verbose: print \"", "print ex # Persist with cleaning up def _check_module(self, depth):", "self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not in d)", "self._add_pkg_dir(uncle_dir) if verbose: print \" Added uncle package:\", uncle_dir cousin_dir", "self.fail(\"Expected import error for \" + mod_name) def test_invalid_names(self): #", "nephew_fname def _check_relative_imports(self, depth, run_name=None): contents = r\"\"\"\\ from __future__", "# Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir,", "# Read from bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] == pkg_name)", "its tests in the current process, which would mess with", "verbose: print ex # Persist with cleaning up for name", "import absolute_import from . import sibling from ..uncle.cousin import nephew", "if verbose: print \"Running from compiled:\", mod_name d2 = run_module(mod_name,", "name in files: try: os.remove(os.path.join(root, name)) except OSError, ex: if", "Added sibling module:\", sibling_fname # Add nephew module uncle_dir =", "code execution\\n\" \"result = ['Top level assignment']\\n\" \"def f():\\n\" \"", "self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def test_library_module(self):", "= r\"\"\"\\ from __future__ import absolute_import from . import sibling", "base_dir, source, depth): if depth <= 1: raise ValueError(\"Relative module", "\"run_name_in_sys_modules = __name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules =", "name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"]", "sys.modules entries\" del sys.path[0] if verbose: print \" Removed sys.path", "r\"\"\"\\ from __future__ import absolute_import from . import sibling from", "\" Package tree in:\", sub_dir sys.path.insert(0, pkg_dir) if verbose: print", "_run_code, _run_module_code, run_module # Note: This module can't safely test", "try: run_module(mod_name) except ImportError: pass else: self.fail(\"Expected import error for", "executes that code path # Set up the test code", "nephew_file.close() if verbose: print \" Added nephew module:\", nephew_fname def", "print \" Added nephew module:\", nephew_fname def _check_relative_imports(self, depth, run_name=None):", "pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in d2) del d2 #", "self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is None)", "None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is None)", "forget(mod_name) try: if verbose: print \"Running from source:\", mod_name d1", "verbose: print \" Added uncle package:\", uncle_dir cousin_dir = os.path.join(uncle_dir,", "1: raise ValueError(\"Relative module test needs depth > 1\") pkg_name", "self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial = object() name =", "relative imports at depth:\", depth self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest)", "try: if verbose: print \"Running from source:\", mod_name d1 =", "\"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0 = sys.argv[0]", "self._check_relative_imports(depth) def test_main_relative_import(self): for depth in range(2, 5): if verbose:", "package tree\" except OSError, ex: if verbose: print ex #", "= '' # Treat as a top level module d1", "\" Added cousin package:\", cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file", "os.path import sys import tempfile from test.test_support import verbose, run_unittest,", "_run_module_code, run_module # Note: This module can't safely test _run_module_as_main", "def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir,", "d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1)", "self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] == self.expected_result)", "if verbose: print \" Updated sys.path:\", sys.path[0] for i in", "def _check_module(self, depth): pkg_dir, mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\", depth))", "module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\" \"import", "the # real __main__ module (usually test.regrtest) # See test_cmd_line_script", "run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\" \"# Check nested", "pkg_dir) if verbose: print \" Updated sys.path:\", sys.path[0] for i", "root, dirs, files in os.walk(top, topdown=False): for name in files:", "\"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir = tempfile.mkdtemp() if", "for a test that executes that code path # Set", "Treat as a top level module d1 = dict(initial=initial) saved_argv0", "pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\") pkg_file.close() return", "Added uncle package:\", uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if", "mod_name): for entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if", "def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file =", "module:\", nephew_fname def _check_relative_imports(self, depth, run_name=None): contents = r\"\"\"\\ from", "\"Running from compiled:\", mod_name d2 = run_module(mod_name) # Read from", "from ..uncle.cousin import nephew \"\"\" pkg_dir, mod_fname, mod_name = (", "module d1 = dict(initial=initial) saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source,", "os.rmdir(top) if verbose: print \" Removed package tree\" except OSError,", "for depth in range(2, 5): if verbose: print \"Testing relative", "= \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir = tempfile.mkdtemp() if verbose: print", "Created:\", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, \"w\")", "Check nested operation\\n\" \"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" )", "import os.path import sys import tempfile from test.test_support import verbose,", "a test that executes that code path # Set up", "self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0] if verbose: print \"Running", "self._check_module(depth) def test_explicit_relative_import(self): for depth in range(2, 5): if verbose:", "if verbose: print ex # Persist with cleaning up try:", "process, which would mess with the # real __main__ module", "print ex # Persist with cleaning up try: os.rmdir(top) if", "def expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError: pass else: self.fail(\"Expected", "Removed sys.path entry\" for root, dirs, files in os.walk(top, topdown=False):", "runpy module import unittest import os import os.path import sys", "= \"<Nonsense>\" file = \"Some other nonsense\" loader = \"Now", "test_explicit_relative_import(self): for depth in range(2, 5): if verbose: print \"Testing", "def test_main_relative_import(self): for depth in range(2, 5): if verbose: print", "self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in sys.modules)", "print \"Testing relative imports at depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self):", "print \" Created:\", mod_fname mod_name = (pkg_name+\".\")*depth + \"runpy_test\" return", "OSError, ex: if verbose: print ex # Persist with cleaning", "mod_name = ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name", "sibling module:\", sibling_fname # Add nephew module uncle_dir = os.path.join(parent_dir,", "safely test _run_module_as_main as it # runs its tests in", "from test.test_support import verbose, run_unittest, forget from runpy import _run_code,", "= \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir = tempfile.mkdtemp()", "test code and expected results class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top", "(usually test.regrtest) # See test_cmd_line_script for a test that executes", "mod_name) if verbose: print \"Module executed successfully\" def test_run_module(self): for", "d2) self.failUnless(\"nephew\" in d2) del d2 # Ensure __loader__ entry", "def test_explicit_relative_import(self): for depth in range(2, 5): if verbose: print", "# Builtin module self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\")", "import sibling from ..uncle.cousin import nephew \"\"\" pkg_dir, mod_fname, mod_name", "doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose:", "self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\"", "self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] == 1) del d2 # Ensure", "\"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in sys.modules\\n\"", "pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print \" Next level in:\",", "entries\" del sys.path[0] if verbose: print \" Removed sys.path entry\"", "bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d2)", "package:\", cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\")", "loader = \"Now you're just being silly\" package = ''", "self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if verbose: print \"Running from source:\",", "self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"]", "open __import__(mod_name) os.remove(mod_fname) if verbose: print \"Running from compiled:\", mod_name", "in d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial = object()", "import error for \" + mod_name) def test_invalid_names(self): # Builtin", "# Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package", "verbose: print \"Running from compiled:\", mod_name d2 = run_module(mod_name) #", "pkg_file = open(pkg_fname, \"w\") pkg_file.close() return pkg_fname def _make_pkg(self, source,", "is file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is", "= open(sibling_fname, \"w\") sibling_file.close() if verbose: print \" Added sibling", "= __name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals()", "depth:\", depth self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if __name__", "= os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\") pkg_file.close() return pkg_fname", "tempfile from test.test_support import verbose, run_unittest, forget from runpy import", "= sub_dir = tempfile.mkdtemp() if verbose: print \" Package tree", "__import__(mod_name) os.remove(mod_fname) if verbose: print \"Running from compiled:\", mod_name d2", "sibling from ..uncle.cousin import nephew \"\"\" pkg_dir, mod_fname, mod_name =", "for i in range(depth): parent_dir = module_dir module_dir = os.path.join(module_dir,", "in d1) self.failUnless(d1[\"x\"] == 1) del d1 # Ensure __loader__", "\" Next level in:\", sub_dir if verbose: print \" Created:\",", "for root, dirs, files in os.walk(top, topdown=False): for name in", "\"w\") mod_file.write(source) mod_file.close() if verbose: print \" Created:\", mod_fname mod_name", "def _check_relative_imports(self, depth, run_name=None): contents = r\"\"\"\\ from __future__ import", "source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] == 1) del d1 #", "Check basic code execution\\n\" \"result = ['Top level assignment']\\n\" \"def", "_add_relative_modules(self, base_dir, source, depth): if depth <= 1: raise ValueError(\"Relative", "verbose: print \" Added sibling module:\", sibling_fname # Add nephew", "= run_module(mod_name, run_name=run_name) # Read from source self.failUnless(\"__package__\" in d1)", "up the test code and expected results class RunModuleCodeTest(unittest.TestCase): expected_result", "saved_argv0) self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name):", "verbose: print \"Module executed successfully\" def test_run_module(self): for depth in", "if verbose: print \" Added uncle package:\", uncle_dir cousin_dir =", "depth self._check_module(depth) def test_explicit_relative_import(self): for depth in range(2, 5): if", "= open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if verbose: print \" Created:\",", "_run_module_code(self.test_source, d1, name, file, loader, package) self.failUnless(\"result\" not in d1)", "_run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is", "open(pkg_fname, \"w\") pkg_file.close() return pkg_fname def _make_pkg(self, source, depth): pkg_name", "= os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print \" Added cousin", "for depth in range(4): if verbose: print \"Testing package depth:\",", "+ \"runpy_test\" return pkg_dir, mod_fname, mod_name def _del_pkg(self, top, depth,", "results class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level assignment\", \"Lower level", "file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is saved_argv0)", "sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\" \"#", "file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print \"Module", "= ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name =", "verbose: print \"Testing relative imports at depth:\", depth self._check_relative_imports(depth) def", "pass else: self.fail(\"Expected import error for \" + mod_name) def", "verbose: print \" Package tree in:\", sub_dir sys.path.insert(0, pkg_dir) if", "tempfile.mkdtemp() if verbose: print \" Package tree in:\", sub_dir sys.path.insert(0,", "# Set up the test code and expected results class", "= _run_module_code(self.test_source, d1, name, file, loader, package) self.failUnless(\"result\" not in", "pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, \"w\") mod_file.write(source)", "with cleaning up for name in dirs: fullname = os.path.join(root,", "os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\") nephew_file.close() if verbose: print", "from . import sibling from ..uncle.cousin import nephew \"\"\" pkg_dir,", "\"result = ['Top level assignment']\\n\" \"def f():\\n\" \" result.append('Lower level", "print \" Updated sys.path:\", sys.path[0] for i in range(depth): sub_dir", "the sys module\\n\" \"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules =", "= dict(initial=initial) saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source, d1, name,", "test_fname) mod_file = open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if verbose: print", "depth in range(4): if verbose: print \"Testing package depth:\", depth", "at depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth in range(2,", "depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth in range(2, 5):", "imports at depth:\", depth self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest)", "import verbose, run_unittest, forget from runpy import _run_code, _run_module_code, run_module", "\" Created:\", mod_fname mod_name = (pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir,", "in:\", sub_dir if verbose: print \" Created:\", pkg_fname mod_fname =", "verbose: print \" Added nephew module:\", nephew_fname def _check_relative_imports(self, depth,", "import _run_code, _run_module_code, run_module # Note: This module can't safely", "depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0] if verbose:", "= os.path.join(module_dir, pkg_name) # Add sibling module sibling_fname = os.path.join(module_dir,", "test_run_code(self): saved_argv0 = sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"] ==", "\"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print \" Added uncle package:\", uncle_dir", "in range(2, 5): if verbose: print \"Testing main relative imports", "self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is loader)", "compiled:\", mod_name d2 = run_module(mod_name) # Read from bytecode self.failUnless(\"x\"", "entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose: print", "print \"Running from source:\", mod_name d1 = run_module(mod_name, run_name=run_name) #", "relative imports at depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth", "sys.argv[0] d2 = _run_module_code(self.test_source, d1, name, file, loader, package) self.failUnless(\"result\"", "is None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is", "module_dir = base_dir for i in range(depth): parent_dir = module_dir", "verbose, run_unittest, forget from runpy import _run_code, _run_module_code, run_module #", "initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is name)", "run_name=run_name) # Read from source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] ==", "mod_name d1 = run_module(mod_name, run_name=run_name) # Read from source self.failUnless(\"__package__\"", "test.test_support import verbose, run_unittest, forget from runpy import _run_code, _run_module_code,", "that executes that code path # Set up the test", "not in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name)", "pkg_name = \"__runpy_pkg__\" module_dir = base_dir for i in range(depth):", "\"f()\\n\" \"# Check the sys module\\n\" \"import sys\\n\" \"run_argv0 =", "if verbose: print \"Module executed successfully\" def test_run_module(self): for depth", "Ensure __loader__ entry doesn't keep file open __import__(mod_name) os.remove(mod_fname) if", "package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase):", "tree in:\", sub_dir sys.path.insert(0, pkg_dir) if verbose: print \" Updated", "d2 # Ensure __loader__ entry doesn't keep file open finally:", "\"Now you're just being silly\" package = '' # Treat", "d1) del d1 # Ensure __loader__ entry doesn't keep file", "verbose: print \"Testing main relative imports at depth:\", depth self._check_relative_imports(depth,", "class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError: pass", "files: try: os.remove(os.path.join(root, name)) except OSError, ex: if verbose: print", "mod_name d2 = run_module(mod_name) # Read from bytecode self.failUnless(\"x\" in", "pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in d1) del d1 #", "self.failUnless(\"nephew\" in d2) del d2 # Ensure __loader__ entry doesn't", "pkg_name) # Add sibling module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file", "== self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"])", "for name in dirs: fullname = os.path.join(root, name) try: os.rmdir(fullname)", "= open(pkg_fname, \"w\") pkg_file.close() return pkg_fname def _make_pkg(self, source, depth):", "from source:\", mod_name d1 = run_module(mod_name, run_name=run_name) # Read from", "up try: os.rmdir(top) if verbose: print \" Removed package tree\"", "is initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is", "# Persist with cleaning up try: os.rmdir(top) if verbose: print", "Set up the test code and expected results class RunModuleCodeTest(unittest.TestCase):", "entry\" for root, dirs, files in os.walk(top, topdown=False): for name", "mod_fname, mod_name = ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth)", "1) del d1 # Ensure __loader__ entry doesn't keep file", "os.remove(os.path.join(root, name)) except OSError, ex: if verbose: print ex #", "name, file, loader, package) self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"] is", "in d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] ==", "(pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir, mod_fname, mod_name def _del_pkg(self, top,", "Persist with cleaning up def _check_module(self, depth): pkg_dir, mod_fname, mod_name", "if verbose: print \" Added nephew module:\", nephew_fname def _check_relative_imports(self,", "parent_dir = module_dir module_dir = os.path.join(module_dir, pkg_name) # Add sibling", "loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in", "\"# Check basic code execution\\n\" \"result = ['Top level assignment']\\n\"", "other nonsense\" loader = \"Now you're just being silly\" package", "return pkg_fname def _make_pkg(self, source, depth): pkg_name = \"__runpy_pkg__\" test_fname", "entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if", "verbose: print \" Created:\", mod_fname mod_name = (pkg_name+\".\")*depth + \"runpy_test\"", "nephew module:\", nephew_fname def _check_relative_imports(self, depth, run_name=None): contents = r\"\"\"\\", "\"\"\" pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir,", "depth, mod_name): for entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry]", "level reference\"] test_source = ( \"# Check basic code execution\\n\"", "in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\"", "self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is", "if verbose: print \"Running from source:\", mod_name d1 = run_module(mod_name)", "os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\") sibling_file.close() if verbose: print", "module\\n\" \"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in", "verbose: print \" Removed sys.path entry\" for root, dirs, files", "os import os.path import sys import tempfile from test.test_support import", "try: os.rmdir(fullname) except OSError, ex: if verbose: print ex #", "cleaning up def _check_module(self, depth): pkg_dir, mod_fname, mod_name = (", "operation\\n\" \"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self):", "Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname", "= module_dir module_dir = os.path.join(module_dir, pkg_name) # Add sibling module", "print ex # Persist with cleaning up for name in", "= \"__runpy_pkg__\" module_dir = base_dir for i in range(depth): parent_dir", "nephew \"\"\" pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents, depth)) try:", "self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] == 1) del d1 # Ensure", "verbose: print ex # Persist with cleaning up try: os.rmdir(top)", "print \" Removed sys.path entry\" for root, dirs, files in", "silly\" package = '' # Treat as a top level", "file open __import__(mod_name) os.remove(mod_fname) if verbose: print \"Running from compiled:\",", "test_source = ( \"# Check basic code execution\\n\" \"result =", "test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\")", "that code path # Set up the test code and", "sys module\\n\" \"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__", "self._del_pkg(pkg_dir, depth, mod_name) if verbose: print \"Module executed successfully\" def", "is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is file)", "tests in the current process, which would mess with the", "self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is", "up for name in dirs: fullname = os.path.join(root, name) try:", "imports at depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth in", "is file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is", "contents, depth) pkg_name = mod_name.rpartition('.')[0] if verbose: print \"Running from", "# Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir)", "Read from source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] == 1) del", "= sys.argv[0] d2 = _run_module_code(self.test_source, d1, name, file, loader, package)", "# Read from bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] == 1)", "in d1) del d1 # Ensure __loader__ entry doesn't keep", "level in:\", sub_dir if verbose: print \" Created:\", pkg_fname mod_fname", "not in d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"]", "range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose:", "entry doesn't keep file open __import__(mod_name) os.remove(mod_fname) if verbose: print", "expected results class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level assignment\", \"Lower", "module can't safely test _run_module_as_main as it # runs its", "print \"Testing main relative imports at depth:\", depth self._check_relative_imports(depth, \"__main__\")", "mod_name def _del_pkg(self, top, depth, mod_name): for entry in list(sys.modules):", "depth, run_name=None): contents = r\"\"\"\\ from __future__ import absolute_import from", "mod_file.close() if verbose: print \" Created:\", mod_fname mod_name = (pkg_name+\".\")*depth", "RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError: pass else:", "= os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print \" Added uncle", "\"Testing relative imports at depth:\", depth self._check_relative_imports(depth) def test_main_relative_import(self): for", "run_unittest, forget from runpy import _run_code, _run_module_code, run_module # Note:", "\"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\") pkg_file.close() return pkg_fname def _make_pkg(self,", "# Test the runpy module import unittest import os import", "mess with the # real __main__ module (usually test.regrtest) #", "( \"# Check basic code execution\\n\" \"result = ['Top level", "\"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\") nephew_file.close() if verbose: print \"", "if verbose: print \" Removed sys.path entry\" for root, dirs,", "import tempfile from test.test_support import verbose, run_unittest, forget from runpy", "level assignment\", \"Lower level reference\"] test_source = ( \"# Check", "sys.path entry\" for root, dirs, files in os.walk(top, topdown=False): for", "as a top level module d1 = dict(initial=initial) saved_argv0 =", "self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try:", "cousin package:\", cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname,", "self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"]", "sibling_file.close() if verbose: print \" Added sibling module:\", sibling_fname #", "# Treat as a top level module d1 = dict(initial=initial)", "verbose: print \"Testing package depth:\", depth self._check_module(depth) def test_explicit_relative_import(self): for", "ImportError: pass else: self.fail(\"Expected import error for \" + mod_name)", "self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname =", "in:\", sub_dir sys.path.insert(0, pkg_dir) if verbose: print \" Updated sys.path:\",", "test _run_module_as_main as it # runs its tests in the", "source:\", mod_name d1 = run_module(mod_name, run_name=run_name) # Read from source", "depth in range(2, 5): if verbose: print \"Testing main relative", "self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") #", "Removed package tree\" except OSError, ex: if verbose: print ex", "in range(4): if verbose: print \"Testing package depth:\", depth self._check_module(depth)", "See test_cmd_line_script for a test that executes that code path", "pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents,", "is saved_argv0) self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self,", "_add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname,", "self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"]", "basic code execution\\n\" \"result = ['Top level assignment']\\n\" \"def f():\\n\"", "pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\")", "\"Running from compiled:\", mod_name d2 = run_module(mod_name, run_name=run_name) # Read", "pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print \" Next level", "_check_module(self, depth): pkg_dir, mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name)", "from bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] == 1) del d2", "verbose: print ex # Persist with cleaning up def _check_module(self,", "in range(depth): parent_dir = module_dir module_dir = os.path.join(module_dir, pkg_name) #", "RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level assignment\", \"Lower level reference\"] test_source", "mod_fname mod_name = (pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir, mod_fname, mod_name", "run_module(mod_name) # Read from bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] ==", "del d2 # Ensure __loader__ entry doesn't keep file open", "\"<Nonsense>\" file = \"Some other nonsense\" loader = \"Now you're", "absolute_import from . import sibling from ..uncle.cousin import nephew \"\"\"", "range(2, 5): if verbose: print \"Testing main relative imports at", "top, depth, mod_name): for entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del", "if verbose: print ex # Persist with cleaning up def", "d2 = run_module(mod_name) # Read from bytecode self.failUnless(\"x\" in d2)", "\"w\") nephew_file.close() if verbose: print \" Added nephew module:\", nephew_fname", "d2) del d2 # Ensure __loader__ entry doesn't keep file", "successfully\" def test_run_module(self): for depth in range(4): if verbose: print", "depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth in range(2, 5): if", "for name in files: try: os.remove(os.path.join(root, name)) except OSError, ex:", "test needs depth > 1\") pkg_name = \"__runpy_pkg__\" module_dir =", "a top level module d1 = dict(initial=initial) saved_argv0 = sys.argv[0]", "pkg_name = mod_name.rpartition('.')[0] if verbose: print \"Running from source:\", mod_name", "object() name = \"<Nonsense>\" file = \"Some other nonsense\" loader", "except OSError, ex: if verbose: print ex # Persist with", "\"Some other nonsense\" loader = \"Now you're just being silly\"", "\"w\") pkg_file.close() return pkg_fname def _make_pkg(self, source, depth): pkg_name =", "= run_module(mod_name, run_name=run_name) # Read from bytecode self.failUnless(\"__package__\" in d2)", "d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is None)", "with cleaning up def _check_module(self, depth): pkg_dir, mod_fname, mod_name =", "self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in d2) del", "module import unittest import os import os.path import sys import", "file = \"Some other nonsense\" loader = \"Now you're just", "assignment\", \"Lower level reference\"] test_source = ( \"# Check basic", "from compiled:\", mod_name d2 = run_module(mod_name) # Read from bytecode", "if verbose: print \" Created:\", pkg_fname mod_fname = os.path.join(sub_dir, test_fname)", "files in os.walk(top, topdown=False): for name in files: try: os.remove(os.path.join(root,", "source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d1)", "you're just being silly\" package = '' # Treat as", "self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in d2) del d2 # Ensure", "None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not in", "initial = object() name = \"<Nonsense>\" file = \"Some other", "open(sibling_fname, \"w\") sibling_file.close() if verbose: print \" Added sibling module:\",", "assignment']\\n\" \"def f():\\n\" \" result.append('Lower level reference')\\n\" \"f()\\n\" \"# Check", "as it # runs its tests in the current process,", "for i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname =", "print \"Module executed successfully\" def _add_relative_modules(self, base_dir, source, depth): if", "== pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in d2) del d2", "..uncle.cousin import nephew \"\"\" pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents,", "and expected results class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level assignment\",", "d1 = dict(initial=initial) saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source, d1,", "the current process, which would mess with the # real", "d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in d2)", "= [\"Top level assignment\", \"Lower level reference\"] test_source = (", "depth): pkg_name = \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir", "unittest import os import os.path import sys import tempfile from", "print \"Module executed successfully\" def test_run_module(self): for depth in range(4):", "module uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print \"", "self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\"", "self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def", "runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0 = sys.argv[0] d =", "\"Testing main relative imports at depth:\", depth self._check_relative_imports(depth, \"__main__\") def", "verbose: print \" Removed sys.modules entries\" del sys.path[0] if verbose:", "\"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\\n\" \"# Check", "{}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is None)", "= (pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir, mod_fname, mod_name def _del_pkg(self,", "mod_file.write(source) mod_file.close() if verbose: print \" Created:\", mod_fname mod_name =", "saved_argv0) def test_run_module_code(self): initial = object() name = \"<Nonsense>\" file", "Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\")", "\" Removed sys.modules entries\" del sys.path[0] if verbose: print \"", "( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0]", "sys.modules[entry] if verbose: print \" Removed sys.modules entries\" del sys.path[0]", "is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in sys.modules) class", "list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose: print \" Removed", "if verbose: print \" Removed sys.modules entries\" del sys.path[0] if", "mod_name = ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if verbose: print", "= os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\") nephew_file.close() if verbose:", "self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0] if", "sibling_fname # Add nephew module uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir)", "runs its tests in the current process, which would mess", "d1, name, file, loader, package) self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"]", "test.regrtest) # See test_cmd_line_script for a test that executes that", "for entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose:", "if verbose: print \"Testing package depth:\", depth self._check_module(depth) def test_explicit_relative_import(self):", "self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name", "\" Created:\", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname,", "del sys.modules[entry] if verbose: print \" Removed sys.modules entries\" del", "self.failUnless(\"nephew\" in d1) del d1 # Ensure __loader__ entry doesn't", "d1 = run_module(mod_name) # Read from source self.failUnless(\"x\" in d1)", "doesn't keep file open __import__(mod_name) os.remove(mod_fname) if verbose: print \"Running", "test_cmd_line_script for a test that executes that code path #", "can't safely test _run_module_as_main as it # runs its tests", "mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0 = sys.argv[0] d = _run_code(self.test_source,", "uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print \"", "if depth <= 1: raise ValueError(\"Relative module test needs depth", "depth > 1\") pkg_name = \"__runpy_pkg__\" module_dir = base_dir for", "depth:\", depth self._check_module(depth) def test_explicit_relative_import(self): for depth in range(2, 5):", "if verbose: print \"Running from source:\", mod_name d1 = run_module(mod_name,", "del sys.path[0] if verbose: print \" Removed sys.path entry\" for", "# Add sibling module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file =", "\"Running from source:\", mod_name d1 = run_module(mod_name, run_name=run_name) # Read", "mod_name d1 = run_module(mod_name) # Read from source self.failUnless(\"x\" in", "= ( \"# Check basic code execution\\n\" \"result = ['Top", "= open(nephew_fname, \"w\") nephew_file.close() if verbose: print \" Added nephew", "f():\\n\" \" result.append('Lower level reference')\\n\" \"f()\\n\" \"# Check the sys", "top level module d1 = dict(initial=initial) saved_argv0 = sys.argv[0] d2", "from source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] == 1) del d1", "current process, which would mess with the # real __main__", "try: os.rmdir(top) if verbose: print \" Removed package tree\" except", "nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\") nephew_file.close() if", "main relative imports at depth:\", depth self._check_relative_imports(depth, \"__main__\") def test_main():", "sub_dir = tempfile.mkdtemp() if verbose: print \" Package tree in:\",", "# Add nephew module uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if", "module (usually test.regrtest) # See test_cmd_line_script for a test that", "from __future__ import absolute_import from . import sibling from ..uncle.cousin", "which would mess with the # real __main__ module (usually", "depth self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if __name__ ==", "uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print \" Added", "Persist with cleaning up for name in dirs: fullname =", "print \" Next level in:\", sub_dir if verbose: print \"", "os.remove(mod_fname) if verbose: print \"Running from compiled:\", mod_name d2 =", "in d2) self.failUnless(d2[\"x\"] == 1) del d2 # Ensure __loader__", "source, depth): pkg_name = \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir =", "if verbose: print \"Running from compiled:\", mod_name d2 = run_module(mod_name)", "self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"]", "# Read from source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] == pkg_name)", "package depth:\", depth self._check_module(depth) def test_explicit_relative_import(self): for depth in range(2,", "self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\")", "just being silly\" package = '' # Treat as a", "file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is package)", "self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0]", "bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] == 1) del d2 #", "print \"Testing package depth:\", depth self._check_module(depth) def test_explicit_relative_import(self): for depth", "for depth in range(2, 5): if verbose: print \"Testing main", "self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in d1) del d1 # Ensure", "source:\", mod_name d1 = run_module(mod_name) # Read from source self.failUnless(\"x\"", "depth <= 1: raise ValueError(\"Relative module test needs depth >", "verbose: print \" Created:\", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file", "__loader__ entry doesn't keep file open __import__(mod_name) os.remove(mod_fname) if verbose:", "saved_argv0 = sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result)", "sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\") sibling_file.close() if", "reference')\\n\" \"f()\\n\" \"# Check the sys module\\n\" \"import sys\\n\" \"run_argv0", "nonsense\" loader = \"Now you're just being silly\" package =", "== 1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file)", "tree\" except OSError, ex: if verbose: print ex # Persist", "= sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"]", "depth, mod_name) if verbose: print \"Module executed successfully\" def _add_relative_modules(self,", "self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"])", "= runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0 = sys.argv[0] d", "self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in d1) del", "= run_module(mod_name) # Read from source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"]", "fullname = os.path.join(root, name) try: os.rmdir(fullname) except OSError, ex: if", "\" Removed package tree\" except OSError, ex: if verbose: print", "mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if verbose:", "Note: This module can't safely test _run_module_as_main as it #", "at depth:\", depth self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if", "\"__runpy_pkg__\" module_dir = base_dir for i in range(depth): parent_dir =", "is None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not", "print \"Running from source:\", mod_name d1 = run_module(mod_name) # Read", "= os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if", "sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print", "contents = r\"\"\"\\ from __future__ import absolute_import from . import", "in files: try: os.remove(os.path.join(root, name)) except OSError, ex: if verbose:", "mod_fname, mod_name def _del_pkg(self, top, depth, mod_name): for entry in", "= tempfile.mkdtemp() if verbose: print \" Package tree in:\", sub_dir", "code path # Set up the test code and expected", "open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print \"Module executed", "is sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\" \"import runpy\\n\" \"nested =", "in d2) self.failUnless(\"nephew\" in d2) del d2 # Ensure __loader__", "test_run_module_code(self): initial = object() name = \"<Nonsense>\" file = \"Some", "Added cousin package:\", cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file =", "mod_file = open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if verbose: print \"", "try: os.remove(os.path.join(root, name)) except OSError, ex: if verbose: print ex", "is saved_argv0) self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0] is saved_argv0) def", "\"Lower level reference\"] test_source = ( \"# Check basic code", "Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth,", "name = \"<Nonsense>\" file = \"Some other nonsense\" loader =", "module_dir module_dir = os.path.join(module_dir, pkg_name) # Add sibling module sibling_fname", "\"# Check nested operation\\n\" \"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\"", "base_dir for i in range(depth): parent_dir = module_dir module_dir =", "Check the sys module\\n\" \"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules", "print \" Package tree in:\", sub_dir sys.path.insert(0, pkg_dir) if verbose:", "[\"Top level assignment\", \"Lower level reference\"] test_source = ( \"#", "mod_name = (pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir, mod_fname, mod_name def", "in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"): del sys.modules[entry] if verbose: print \"", ". import sibling from ..uncle.cousin import nephew \"\"\" pkg_dir, mod_fname,", "in d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\" in", "self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] == self.expected_result) self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"]", "else: self.fail(\"Expected import error for \" + mod_name) def test_invalid_names(self):", "self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self,", "['Top level assignment']\\n\" \"def f():\\n\" \" result.append('Lower level reference')\\n\" \"f()\\n\"", "self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial", "_check_relative_imports(self, depth, run_name=None): contents = r\"\"\"\\ from __future__ import absolute_import", "== 1) del d2 # Ensure __loader__ entry doesn't keep", "1\") pkg_name = \"__runpy_pkg__\" module_dir = base_dir for i in", "\" Added uncle package:\", uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir)", "sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \" module_in_sys_modules", "run_module(mod_name) # Read from source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] ==", "keep file open __import__(mod_name) os.remove(mod_fname) if verbose: print \"Running from", "is loader) self.failUnless(d2[\"__package__\"] is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not", "package = '' # Treat as a top level module", "Add sibling module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname,", "in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name) except", "_make_pkg(self, source, depth): pkg_name = \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir", "'' # Treat as a top level module d1 =", "name) try: os.rmdir(fullname) except OSError, ex: if verbose: print ex", "_del_pkg(self, top, depth, mod_name): for entry in list(sys.modules): if entry.startswith(\"__runpy_pkg__\"):", "self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase): def", "topdown=False): for name in files: try: os.remove(os.path.join(root, name)) except OSError,", "os.rmdir(fullname) except OSError, ex: if verbose: print ex # Persist", "Read from source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\"", "print \"Running from compiled:\", mod_name d2 = run_module(mod_name, run_name=run_name) #", "run_name=run_name) # Read from bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] ==", "verbose: print \" Added cousin package:\", cousin_dir nephew_fname = os.path.join(cousin_dir,", "with cleaning up try: os.rmdir(top) if verbose: print \" Removed", "sys\\n\" \"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in sys.modules\\n\" \"if", "range(depth): parent_dir = module_dir module_dir = os.path.join(module_dir, pkg_name) # Add", "\" result.append('Lower level reference')\\n\" \"f()\\n\" \"# Check the sys module\\n\"", "sub_dir if verbose: print \" Created:\", pkg_fname mod_fname = os.path.join(sub_dir,", "d2 = _run_module_code(self.test_source, d1, name, file, loader, package) self.failUnless(\"result\" not", "self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d2) self.failUnless(\"nephew\"", "executed successfully\" def test_run_module(self): for depth in range(4): if verbose:", "self.failUnless(d2[\"x\"] == 1) del d2 # Ensure __loader__ entry doesn't", "dirs, files in os.walk(top, topdown=False): for name in files: try:", "the runpy module import unittest import os import os.path import", "level reference')\\n\" \"f()\\n\" \"# Check the sys module\\n\" \"import sys\\n\"", "cleaning up try: os.rmdir(top) if verbose: print \" Removed package", "= run_module(mod_name) # Read from bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"]", "\" Added sibling module:\", sibling_fname # Add nephew module uncle_dir", "from source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in", "dict(initial=initial) saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source, d1, name, file,", "name in dirs: fullname = os.path.join(root, name) try: os.rmdir(fullname) except", "runpy import _run_code, _run_module_code, run_module # Note: This module can't", "code and expected results class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level", "nephew_file = open(nephew_fname, \"w\") nephew_file.close() if verbose: print \" Added", "= ['Top level assignment']\\n\" \"def f():\\n\" \" result.append('Lower level reference')\\n\"", "= os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print \"", "keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print", "Removed sys.modules entries\" del sys.path[0] if verbose: print \" Removed", "package) self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"] ==", "self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def test_library_module(self): run_module(\"runpy\") def _add_pkg_dir(self, pkg_dir):", "pkg_dir, mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if", "verbose: print \"Running from source:\", mod_name d1 = run_module(mod_name) #", "= os.path.join(root, name) try: os.rmdir(fullname) except OSError, ex: if verbose:", "d1 = run_module(mod_name, run_name=run_name) # Read from source self.failUnless(\"__package__\" in", "level assignment']\\n\" \"def f():\\n\" \" result.append('Lower level reference')\\n\" \"f()\\n\" \"#", "def _add_relative_modules(self, base_dir, source, depth): if depth <= 1: raise", "d1) self.failUnless(d1[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in d1)", "verbose: print \"Module executed successfully\" def _add_relative_modules(self, base_dir, source, depth):", "d2) self.failUnless(d2[\"x\"] == 1) del d2 # Ensure __loader__ entry", "== self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"] is None) self.failUnless(d[\"__loader__\"] is", "d2 = run_module(mod_name, run_name=run_name) # Read from bytecode self.failUnless(\"__package__\" in", "os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print \" Added cousin package:\",", "from bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"] == pkg_name) self.failUnless(\"sibling\" in", "needs depth > 1\") pkg_name = \"__runpy_pkg__\" module_dir = base_dir", "\" Updated sys.path:\", sys.path[0] for i in range(depth): sub_dir =", "= object() name = \"<Nonsense>\" file = \"Some other nonsense\"", "os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print \" Next", "# Read from source self.failUnless(\"x\" in d1) self.failUnless(d1[\"x\"] == 1)", "saved_argv0) self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self):", "depth): if depth <= 1: raise ValueError(\"Relative module test needs", "def _make_pkg(self, source, depth): pkg_name = \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\"", "module_dir = os.path.join(module_dir, pkg_name) # Add sibling module sibling_fname =", "globals() is sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\" \"import runpy\\n\" \"nested", "self._add_pkg_dir(cousin_dir) if verbose: print \" Added cousin package:\", cousin_dir nephew_fname", "from compiled:\", mod_name d2 = run_module(mod_name, run_name=run_name) # Read from", "os.path.join(module_dir, pkg_name) # Add sibling module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\")", "if verbose: print \" Added cousin package:\", cousin_dir nephew_fname =", "\"run_argv0 = sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\"", "= sys.argv[0]\\n\" \"run_name_in_sys_modules = __name__ in sys.modules\\n\" \"if run_name_in_sys_modules:\\n\" \"", "sys import tempfile from test.test_support import verbose, run_unittest, forget from", "= mod_name.rpartition('.')[0] if verbose: print \"Running from source:\", mod_name d1", "\"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if __name__ == \"__main__\": test_main()", "\"# Check the sys module\\n\" \"import sys\\n\" \"run_argv0 = sys.argv[0]\\n\"", "sys.modules[__name__].__dict__\\n\" \"# Check nested operation\\n\" \"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n',", "def test_run_module(self): for depth in range(4): if verbose: print \"Testing", "os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\") pkg_file.close()", "module test needs depth > 1\") pkg_name = \"__runpy_pkg__\" module_dir", "if verbose: print \"Module executed successfully\" def _add_relative_modules(self, base_dir, source,", "reference\"] test_source = ( \"# Check basic code execution\\n\" \"result", "compiled:\", mod_name d2 = run_module(mod_name, run_name=run_name) # Read from bytecode", "test_run_module(self): for depth in range(4): if verbose: print \"Testing package", "__future__ import absolute_import from . import sibling from ..uncle.cousin import", "d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial = object() name", "not in d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial =", "os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print \" Added uncle package:\",", "in os.walk(top, topdown=False): for name in files: try: os.remove(os.path.join(root, name))", "d1 # Ensure __loader__ entry doesn't keep file open __import__(mod_name)", "verbose: print \"Running from source:\", mod_name d1 = run_module(mod_name, run_name=run_name)", "\"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print \" Added cousin package:\", cousin_dir", "import unittest import os import os.path import sys import tempfile", "\"Testing package depth:\", depth self._check_module(depth) def test_explicit_relative_import(self): for depth in", "pkg_fname def _make_pkg(self, source, depth): pkg_name = \"__runpy_pkg__\" test_fname =", "in dirs: fullname = os.path.join(root, name) try: os.rmdir(fullname) except OSError,", "ex # Persist with cleaning up def _check_module(self, depth): pkg_dir,", "class RunModuleCodeTest(unittest.TestCase): expected_result = [\"Top level assignment\", \"Lower level reference\"]", "execution\\n\" \"result = ['Top level assignment']\\n\" \"def f():\\n\" \" result.append('Lower", "\" Removed sys.path entry\" for root, dirs, files in os.walk(top,", "test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir = tempfile.mkdtemp() if verbose:", "depth in range(2, 5): if verbose: print \"Testing relative imports", "run_module # Note: This module can't safely test _run_module_as_main as", "is None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0]", "Read from bytecode self.failUnless(\"x\" in d2) self.failUnless(d2[\"x\"] == 1) del", "if verbose: print \" Next level in:\", sub_dir if verbose:", "print \" Removed package tree\" except OSError, ex: if verbose:", "in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if", "try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0] if verbose: print", "os.path.join(pkg_dir, \"__init__\"+os.extsep+\"py\") pkg_file = open(pkg_fname, \"w\") pkg_file.close() return pkg_fname def", "\"Module executed successfully\" def test_run_module(self): for depth in range(4): if", "test_main_relative_import(self): for depth in range(2, 5): if verbose: print \"Testing", "source, depth): if depth <= 1: raise ValueError(\"Relative module test", "for \" + mod_name) def test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\")", "from source:\", mod_name d1 = run_module(mod_name) # Read from source", "d1) self.failUnless(d1[\"x\"] == 1) del d1 # Ensure __loader__ entry", "uncle package:\", uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose:", ") def test_run_code(self): saved_argv0 = sys.argv[0] d = _run_code(self.test_source, {})", "def test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\")", "if verbose: print \"Testing main relative imports at depth:\", depth", "__loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name)", "sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is", "<= 1: raise ValueError(\"Relative module test needs depth > 1\")", "_run_module_as_main as it # runs its tests in the current", "run_module(mod_name) except ImportError: pass else: self.fail(\"Expected import error for \"", "\"def f():\\n\" \" result.append('Lower level reference')\\n\" \"f()\\n\" \"# Check the", "pkg_dir, mod_fname, mod_name def _del_pkg(self, top, depth, mod_name): for entry", "# runs its tests in the current process, which would", "run_module(mod_name, run_name=run_name) # Read from bytecode self.failUnless(\"__package__\" in d2) self.failUnless(d2[\"__package__\"]", "= _run_code(self.test_source, {}) self.failUnless(d[\"result\"] == self.expected_result) self.failUnless(d[\"__name__\"] is None) self.failUnless(d[\"__file__\"]", "del d1 # Ensure __loader__ entry doesn't keep file open", "# Ensure __loader__ entry doesn't keep file open __import__(mod_name) os.remove(mod_fname)", "d1) self.failUnless(\"nephew\" in d1) del d1 # Ensure __loader__ entry", "ValueError(\"Relative module test needs depth > 1\") pkg_name = \"__runpy_pkg__\"", "Test the runpy module import unittest import os import os.path", "+ mod_name) def test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\") # Non-existent", "# real __main__ module (usually test.regrtest) # See test_cmd_line_script for", "os.walk(top, topdown=False): for name in files: try: os.remove(os.path.join(root, name)) except", "print \" Added uncle package:\", uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\")", "in range(2, 5): if verbose: print \"Testing relative imports at", "expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError: pass else: self.fail(\"Expected import", "if verbose: print \" Created:\", mod_fname mod_name = (pkg_name+\".\")*depth +", "# Note: This module can't safely test _run_module_as_main as it", "with the # real __main__ module (usually test.regrtest) # See", "in the current process, which would mess with the #", "runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def test_run_code(self): saved_argv0 =", "Next level in:\", sub_dir if verbose: print \" Created:\", pkg_fname", "mod_name.rpartition('.')[0] if verbose: print \"Running from source:\", mod_name d1 =", "\"w\") sibling_file.close() if verbose: print \" Added sibling module:\", sibling_fname", "sys.path.insert(0, pkg_dir) if verbose: print \" Updated sys.path:\", sys.path[0] for", "cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print \" Added", "module:\", sibling_fname # Add nephew module uncle_dir = os.path.join(parent_dir, \"uncle\")", "test that executes that code path # Set up the", "Created:\", mod_fname mod_name = (pkg_name+\".\")*depth + \"runpy_test\" return pkg_dir, mod_fname,", "real __main__ module (usually test.regrtest) # See test_cmd_line_script for a", "if verbose: print \" Added sibling module:\", sibling_fname # Add", "Persist with cleaning up try: os.rmdir(top) if verbose: print \"", "if verbose: print \" Package tree in:\", sub_dir sys.path.insert(0, pkg_dir)", "sub_dir sys.path.insert(0, pkg_dir) if verbose: print \" Updated sys.path:\", sys.path[0]", "up def _check_module(self, depth): pkg_dir, mod_fname, mod_name = ( self._make_pkg(\"x=1\\n\",", "> 1\") pkg_name = \"__runpy_pkg__\" module_dir = base_dir for i", "print \" Created:\", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file =", "\"runpy_test\" return pkg_dir, mod_fname, mod_name def _del_pkg(self, top, depth, mod_name):", "module self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\")", "in d2) del d2 # Ensure __loader__ entry doesn't keep", "run_name=None): contents = r\"\"\"\\ from __future__ import absolute_import from .", "= \"Some other nonsense\" loader = \"Now you're just being", "sibling_file = open(sibling_fname, \"w\") sibling_file.close() if verbose: print \" Added", "range(4): if verbose: print \"Testing package depth:\", depth self._check_module(depth) def", "cleaning up for name in dirs: fullname = os.path.join(root, name)", "pkg_name = \"__runpy_pkg__\" test_fname = \"runpy_test\"+os.extsep+\"py\" pkg_dir = sub_dir =", "pkg_dir = sub_dir = tempfile.mkdtemp() if verbose: print \" Package", "successfully\" def _add_relative_modules(self, base_dir, source, depth): if depth <= 1:", "depth, mod_name) if verbose: print \"Module executed successfully\" def test_run_module(self):", "5): if verbose: print \"Testing relative imports at depth:\", depth", "mod_name d2 = run_module(mod_name, run_name=run_name) # Read from bytecode self.failUnless(\"__package__\"", "import nephew \"\"\" pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents, depth))", "verbose: print \"Running from compiled:\", mod_name d2 = run_module(mod_name, run_name=run_name)", "self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0] is saved_argv0)", "Added nephew module:\", nephew_fname def _check_relative_imports(self, depth, run_name=None): contents =", "level module d1 = dict(initial=initial) saved_argv0 = sys.argv[0] d2 =", "in d1) self.failUnless(\"nephew\" in d1) del d1 # Ensure __loader__", "# Persist with cleaning up for name in dirs: fullname", "the test code and expected results class RunModuleCodeTest(unittest.TestCase): expected_result =", "range(2, 5): if verbose: print \"Testing relative imports at depth:\",", "modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\") self.expect_import_error(\"..eaten\") # Package self.expect_import_error(\"logging\") def", "== pkg_name) self.failUnless(\"sibling\" in d1) self.failUnless(\"nephew\" in d1) del d1", "self.failUnless(d2[\"nested\"][\"x\"] == 1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is", "= self._add_pkg_dir(sub_dir) if verbose: print \" Next level in:\", sub_dir", "mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, \"w\") mod_file.write(source) mod_file.close()", "import os import os.path import sys import tempfile from test.test_support", "sys.path[0] if verbose: print \" Removed sys.path entry\" for root,", "sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError:", "pkg_file.close() return pkg_fname def _make_pkg(self, source, depth): pkg_name = \"__runpy_pkg__\"", "import sys import tempfile from test.test_support import verbose, run_unittest, forget", "( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if verbose: print \"Running from", "def test_run_module_code(self): initial = object() name = \"<Nonsense>\" file =", "nested operation\\n\" \"import runpy\\n\" \"nested = runpy._run_module_code('x=1\\\\n', mod_name='<run>')\\n\" ) def", "__main__ module (usually test.regrtest) # See test_cmd_line_script for a test", "i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir)", "print \" Added sibling module:\", sibling_fname # Add nephew module", "i in range(depth): parent_dir = module_dir module_dir = os.path.join(module_dir, pkg_name)", "from runpy import _run_code, _run_module_code, run_module # Note: This module", "\" + mod_name) def test_invalid_names(self): # Builtin module self.expect_import_error(\"sys\") #", "sys.path:\", sys.path[0] for i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name)", "if verbose: print \" Removed package tree\" except OSError, ex:", "Package tree in:\", sub_dir sys.path.insert(0, pkg_dir) if verbose: print \"", "open(mod_fname, \"w\") mod_file.write(source) mod_file.close() if verbose: print \" Created:\", mod_fname", "run_module(mod_name, run_name=run_name) # Read from source self.failUnless(\"__package__\" in d1) self.failUnless(d1[\"__package__\"]", "self._check_relative_imports(depth, \"__main__\") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if __name__ == \"__main__\":", "module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\") sibling_file.close()", "being silly\" package = '' # Treat as a top", "\"Module executed successfully\" def _add_relative_modules(self, base_dir, source, depth): if depth", "sys.path[0] for i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname", "depth) pkg_name = mod_name.rpartition('.')[0] if verbose: print \"Running from source:\",", "This module can't safely test _run_module_as_main as it # runs", "def test_run_code(self): saved_argv0 = sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d[\"result\"]", "1) self.failUnless(d2[\"__name__\"] is name) self.failUnless(d2[\"run_name_in_sys_modules\"]) self.failUnless(d2[\"module_in_sys_modules\"]) self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"]", "error for \" + mod_name) def test_invalid_names(self): # Builtin module", "ex # Persist with cleaning up for name in dirs:", "self.failUnless(d1[\"x\"] == 1) del d1 # Ensure __loader__ entry doesn't", "saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source, d1, name, file, loader,", "self.failUnless(d2[\"__file__\"] is file) self.failUnless(d2[\"run_argv0\"] is file) self.failUnless(d2[\"__loader__\"] is loader) self.failUnless(d2[\"__package__\"]", "is None) self.failUnless(d[\"__loader__\"] is None) self.failUnless(d[\"__package__\"] is None) self.failUnless(d[\"run_argv0\"] is", "Builtin module self.expect_import_error(\"sys\") # Non-existent modules self.expect_import_error(\"sys.imp.eric\") self.expect_import_error(\"os.path.half\") self.expect_import_error(\"a.bee\") self.expect_import_error(\".howard\")", "if verbose: print ex # Persist with cleaning up for", "ex # Persist with cleaning up try: os.rmdir(top) if verbose:", "None) self.failUnless(d[\"run_argv0\"] is saved_argv0) self.failUnless(\"run_name\" not in d) self.failUnless(sys.argv[0] is", "name)) except OSError, ex: if verbose: print ex # Persist", "executed successfully\" def _add_relative_modules(self, base_dir, source, depth): if depth <=", "print \" Removed sys.modules entries\" del sys.path[0] if verbose: print", "sibling module sibling_fname = os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\")", "path # Set up the test code and expected results", "cousin_dir nephew_fname = os.path.join(cousin_dir, \"nephew\"+os.extsep+\"py\") nephew_file = open(nephew_fname, \"w\") nephew_file.close()", "nephew module uncle_dir = os.path.join(parent_dir, \"uncle\") self._add_pkg_dir(uncle_dir) if verbose: print", "loader, package) self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"] is initial) self.failUnless(d2[\"result\"]", "package:\", uncle_dir cousin_dir = os.path.join(uncle_dir, \"cousin\") self._add_pkg_dir(cousin_dir) if verbose: print", "5): if verbose: print \"Testing main relative imports at depth:\",", "expected_result = [\"Top level assignment\", \"Lower level reference\"] test_source =", "= os.path.join(module_dir, \"sibling\"+os.extsep+\"py\") sibling_file = open(sibling_fname, \"w\") sibling_file.close() if verbose:", "file, loader, package) self.failUnless(\"result\" not in d1) self.failUnless(d2[\"initial\"] is initial)", "= \"Now you're just being silly\" package = '' #", "= ( self._make_pkg(\"x=1\\n\", depth)) forget(mod_name) try: if verbose: print \"Running", "forget from runpy import _run_code, _run_module_code, run_module # Note: This" ]
[ "full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None,", "index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1,", "ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__ =", "import message as _message from google.protobuf import reflection as _reflection", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12,", "cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", ")) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None,", "_descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor(", "\\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None,", "import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from", "_message from google.protobuf import reflection as _reflection from google.protobuf import", "__module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass',", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2,", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='',", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS =", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, )", "_descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image',", "fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False,", "= _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR =", "_IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict(", "DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData)", "file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3',", "= _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01", "default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height',", "= 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,),", "'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict(", "dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) ))", "serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2,", "name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9,", "_IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5,", "output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR # @@protoc_insertion_point(module_scope)", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3,", "index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None,", "full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "by the protocol buffer compiler. DO NOT EDIT! # source:", "(lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf", "\\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData',", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False,", "= 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor',", "_descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor(", ") _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "= _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,),", "DO NOT EDIT! # source: imagedata.proto import sys _b=sys.version_info[0]<3 and", "label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor(", "as _descriptor from google.protobuf import message as _message from google.protobuf", "index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None,", "default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6,", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93,", "DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData',", "(_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData)", "DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass)", "serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA =", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData']", "number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "as _message from google.protobuf import reflection as _reflection from google.protobuf", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1,", "_PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1,", "dict( DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) ))", "serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA,", "from google.protobuf import message as _message from google.protobuf import reflection", "_descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR", "number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "# Generated by the protocol buffer compiler. DO NOT EDIT!", "imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))", "PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ =", "<gh_stars>1-10 # -*- coding: utf-8 -*- # Generated by the", "serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR,", "(lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as", "the protocol buffer compiler. DO NOT EDIT! # source: imagedata.proto", "oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] =", "= _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04", "= _IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass", "name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", "_descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ])", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03", "\\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None,", "@@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR =", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ],", "name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction',", "index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor']", "_sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__", "utf-8 -*- # Generated by the protocol buffer compiler. DO", "_PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR =", "serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None,", "import descriptor as _descriptor from google.protobuf import message as _message", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor(", ")) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS,", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19,", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3,", "name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0,", "Generated by the protocol buffer compiler. DO NOT EDIT! #", "x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3',", "full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None,", "message as _message from google.protobuf import reflection as _reflection from", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None,", "full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None,", "compiler. DO NOT EDIT! # source: imagedata.proto import sys _b=sys.version_info[0]<3", "oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass',", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto',", "(_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass)", "_descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[],", "full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction',", "_descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None,", "_sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None,", "name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3')", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS", "= _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output',", "_descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "protocol buffer compiler. DO NOT EDIT! # source: imagedata.proto import", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1,", "coding: utf-8 -*- # Generated by the protocol buffer compiler.", "-*- # Generated by the protocol buffer compiler. DO NOT", "descriptor as _descriptor from google.protobuf import message as _message from", "index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor'] =", "google.protobuf import descriptor as _descriptor from google.protobuf import message as", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9,", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR", "number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "NOT EDIT! # source: imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda", "x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import", "_descriptor from google.protobuf import message as _message from google.protobuf import", "name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0,", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "# @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "= _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[", "= _PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR", "package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02 \\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') )", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "source: imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda", "\\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR,", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1,", "number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False,", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, )", "type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width',", "has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS", "_reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2' #", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype',", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1,", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False,", "_symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x0fimagedata.proto\\\"H\\n\\tImageData\\x12\\r\\n\\x05image\\x18\\x01 \\x01(\\x0c\\x12\\x0e\\n\\x06height\\x18\\x02", "name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None,", "_descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output',", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91,", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3,", "serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS,", "file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2,", "input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR #", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False,", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor',", "name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR)", "_descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"),", "has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1,", "full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1,", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "# @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR,", "= _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image',", "DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR", "methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ),", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4,", "_PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] =", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width',", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA", "buffer compiler. DO NOT EDIT! # source: imagedata.proto import sys", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "# -*- coding: utf-8 -*- # Generated by the protocol", "index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9,", "_sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128,", "# source: imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height',", ") _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False,", "extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass']", "name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", "extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass',", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "google.protobuf import message as _message from google.protobuf import reflection as", "_IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass =", "@@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0,", "index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None,", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "_reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2' #", "_PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA,", "or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", ") DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData =", "EDIT! # source: imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda x:x)", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "= _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2'", "from google.protobuf import descriptor as _descriptor from google.protobuf import message", "type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "= _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2'", "__module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor(", "], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None,", "\\x01(\\x05\\x12\\r\\n\\x05width\\x18\\x03 \\x01(\\x05\\x12\\r\\n\\x05\\x64type\\x18\\x04 \\x01(\\t\\\"!\\n\\x0fPredictionClass\\x12\\x0e\\n\\x06output\\x18\\x01 \\x03(\\x02\\x32<\\n\\tPredictor\\x12/\\n\\rGetPrediction\\x12\\n.ImageData\\x1a\\x10.PredictionClass\\\"\\x00\\x62\\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData',", "-*- coding: utf-8 -*- # Generated by the protocol buffer", "type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR =" ]
[ "from app.models.event import Event from app.models.order import Order, OrderTicket from", "by event Provides Event name, discount code, marketer mail, count", "sales by status \"\"\" def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending')", "(dictionary with total sales and ticket count) for placed, completed", "status \"\"\" class Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted'", "AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session': db.session, 'methods': {", "schema = AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session': db.session,", "obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] =", "(placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'), )", "placed, completed and pending orders \"\"\" res = {'placed': {},", "fields.String() code = fields.String() email = fields.String() event_name = fields.String()", "fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj):", "import db from app.models.discount_code import DiscountCode from app.models.event import Event", "= { 'model': Event, 'session': db.session, 'methods': { 'query': query", ".group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\"", "or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return res class", "& (completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) &", "= dasherize id = fields.String() code = fields.String() email =", "and ticket count) for placed, completed and pending orders \"\"\"", "or 0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets", "completed and pending orders \"\"\" res = {'placed': {}, 'completed':", "res['pending']['ticket_count'] = obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList): \"\"\"", "id = fields.String() code = fields.String() email = fields.String() event_name", "_): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed')", "0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or", "= ['GET'] decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer", "DiscountCode.id) \\ .filter(Order.status == status) \\ .group_by(Event) \\ .group_by(DiscountCode) \\", "by status \"\"\" class Meta: type_ = 'admin-sales-discounted' self_view =", "with total sales and ticket count) for placed, completed and", "or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales", "\\ .group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema):", "return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets'))", "tickets and total sales for orders grouped by status \"\"\"", "(completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id ==", "import Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap import api", "= 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize id =", ".outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id", "0 return res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales by", "(completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id", "DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id", "discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id)", "fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\" Returns sales", "\"\"\" class Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect", "app.api.helpers.utilities import dasherize from app.api.bootstrap import api from app.models import", "= obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total']", "import api from app.models import db from app.models.discount_code import DiscountCode", "email = fields.String() event_name = fields.String() payment_currency = fields.String() sales", "== status) \\ .group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status)", "func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields from", "func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id == Order.event_id)", "self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id == Order.event_id)", "User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .cte() return self.session.query(discounts, pending,", ".filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .cte() return", "User.email.label('email')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id) \\", "api from app.models import db from app.models.discount_code import DiscountCode from", "{}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or", "\\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) &", "== discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id ==", "marketer and orders and subsequently accumulates sales by status \"\"\"", "res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales by marketer. Joins", "discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id)", ") schema = AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session':", "= fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\" Returns", "or 0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets", "'v1.admin_sales_discounted' inflect = dasherize id = fields.String() code = fields.String()", "discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \\", "'model': Event, 'session': db.session, 'methods': { 'query': query } }", "discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id", "decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer = {", "data_layer = { 'model': Event, 'session': db.session, 'methods': { 'query':", "\"\"\" res = {'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total']", "== discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id))", "sqlalchemy import func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import", "res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or 0", "completed, placed) \\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id ==", "DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id", ".cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by event Provides Event", "class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by event Provides Event name,", "(placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET']", "Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id == Order.event_id) \\", "res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or 0", "<reponame>akashtalole/python-flask-restful-api<filename>app/api/admin_sales/discounted.py from sqlalchemy import func from flask_rest_jsonapi import ResourceList from", "event Provides Event name, discount code, marketer mail, count of", "\"\"\" Discounted sales by event Provides Event name, discount code,", "pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts", ".group_by(User) \\ .group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales", "query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed =", "== discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id ==", "['GET'] decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer =", "count) for placed, completed and pending orders \"\"\" res =", "import dasherize from app.api.bootstrap import api from app.models import db", "from app.models.discount_code import DiscountCode from app.models.event import Event from app.models.order", ".group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by event", "import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize", "code = fields.String() email = fields.String() event_name = fields.String() payment_currency", "app.models.discount_code import DiscountCode from app.models.event import Event from app.models.order import", "= obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource", "{'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or", "completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'),", "placed) \\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id)", "obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] =", "& (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'),", "marketer mail, count of tickets and total sales for orders", "event_name = fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod", "total sales for orders grouped by status \"\"\" class Meta:", "and total sales for orders grouped by status \"\"\" class", "db from app.models.discount_code import DiscountCode from app.models.event import Event from", ".cte() return self.session.query(discounts, pending, completed, placed) \\ .outerjoin(pending, (pending.c.event_id ==", "{}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] =", "= 'v1.admin_sales_discounted' inflect = dasherize id = fields.String() code =", "res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0", "for sales by marketer. Joins event marketer and orders and", "by marketer. Joins event marketer and orders and subsequently accumulates", "\\ .filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status == status) \\ .group_by(Event)", "AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by event Provides Event name, discount", "+ '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id == Order.event_id) \\", ".filter(Order.discount_code_id == DiscountCode.id) \\ .cte() return self.session.query(discounts, pending, completed, placed)", "for placed, completed and pending orders \"\"\" res = {'placed':", "payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\"", "discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods", "sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\" Returns sales (dictionary", "from app.api.bootstrap import api from app.models import db from app.models.discount_code", "import Order, OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return", "count of tickets and total sales for orders grouped by", "sales and ticket count) for placed, completed and pending orders", "== discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id ==", "def calc_sales(obj): \"\"\" Returns sales (dictionary with total sales and", "def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status", "app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status", "import func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields", "sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\", "func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id ==", "of tickets and total sales for orders grouped by status", "= (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer = { 'model':", "(pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id", ".outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id", "placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'),", "DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id ==", "ticket count) for placed, completed and pending orders \"\"\" res", "== discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id))", "== discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators", "from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities", "sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'),", "app.models.order import Order, OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status):", "User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id ==", "Event from app.models.order import Order, OrderTicket from app.models.user import User", "sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'),", "discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id)", "import ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema", "from app.api.helpers.utilities import dasherize from app.api.bootstrap import api from app.models", "= fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod def", "dasherize id = fields.String() code = fields.String() email = fields.String()", "obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] =", "== User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .cte() return self.session.query(discounts,", ".group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status) \\ .cte() class", "== Order.event_id) \\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id)", "marketer. Joins event marketer and orders and subsequently accumulates sales", "mail, count of tickets and total sales for orders grouped", "import Event from app.models.order import Order, OrderTicket from app.models.user import", "self_view = 'v1.admin_sales_discounted' inflect = dasherize id = fields.String() code", "= obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count']", "status \"\"\" def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed =", "orders \"\"\" res = {'placed': {}, 'completed': {}, 'pending': {}}", "or 0 return res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales", "(placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id ==", "& (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed,", "from app.models import db from app.models.discount_code import DiscountCode from app.models.event", "sales by event Provides Event name, discount code, marketer mail,", "from app.models.order import Order, OrderTicket from app.models.user import User def", "ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from", "pending orders \"\"\" res = {'placed': {}, 'completed': {}, 'pending':", "== DiscountCode.id) \\ .filter(Order.status == status) \\ .group_by(Event) \\ .group_by(DiscountCode)", ".filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id ==", "= fields.String() email = fields.String() event_name = fields.String() payment_currency =", "discount code, marketer mail, count of tickets and total sales", "{}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0", "fields.String() event_name = fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales')", "\\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .cte()", "fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from", "+ '_tickets')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id)", "obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for", "User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id)", "class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales by marketer. Joins event", "= fields.String() event_name = fields.String() payment_currency = fields.String() sales =", "res = {'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] =", "\\ .group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by", "marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap import", "(pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id ==", "grouped by status \"\"\" class Meta: type_ = 'admin-sales-discounted' self_view", "or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales", "OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'),", "= AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session': db.session, 'methods':", "Resource for sales by marketer. Joins event marketer and orders", "0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or", "(pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id", "\"\"\" Returns sales (dictionary with total sales and ticket count)", "= obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return", "and pending orders \"\"\" res = {'placed': {}, 'completed': {},", "= sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email'))", "\\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id", "marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import", ".filter(Order.status == status) \\ .group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User) \\", "self.session.query(discounts, pending, completed, placed) \\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) &", "'_tickets')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id == User.id) \\", "event marketer and orders and subsequently accumulates sales by status", ".filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status == status) \\ .group_by(Event) \\", "(completed.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id", "= {'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales", "from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap", "== User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status == status)", "'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count']", "obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] =", "app.models.event import Event from app.models.order import Order, OrderTicket from app.models.user", "accumulates sales by status \"\"\" def query(self, _): pending =", "'_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\ .filter(Event.id == Order.event_id) \\ .filter(Order.marketer_id", "& (pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) &", "code, marketer mail, count of tickets and total sales for", "obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return res", "User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status == status) \\", "User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'),", "Event name, discount code, marketer mail, count of tickets and", "discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \\", "subsequently accumulates sales by status \"\"\" def query(self, _): pending", "DiscountCode.id) \\ .cte() return self.session.query(discounts, pending, completed, placed) \\ .outerjoin(pending,", "== discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id))", "return res class AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales by marketer.", "0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or", "\"\"\" def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed')", "and orders and subsequently accumulates sales by status \"\"\" def", "dasherize from app.api.bootstrap import api from app.models import db from", "Order.event_id) \\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\", "\\ .filter(Order.discount_code_id == DiscountCode.id) \\ .cte() return self.session.query(discounts, pending, completed,", "class Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect =", "sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status +", "discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'), ) schema =", "res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0", "\\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted sales by event Provides", "= obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total']", "Joins event marketer and orders and subsequently accumulates sales by", "app.api.bootstrap import api from app.models import db from app.models.discount_code import", "= obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count']", "def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed", "= self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \\ .filter(Event.id ==", "& (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \\ .outerjoin(completed,", "& (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods =", "\\ .group_by(User) \\ .group_by(Order.status) \\ .cte() class AdminSalesDiscountedSchema(Schema): \"\"\" Discounted", "discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id)", "Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap import api from", "inflect = dasherize id = fields.String() code = fields.String() email", "fields.String() email = fields.String() event_name = fields.String() payment_currency = fields.String()", "0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or", "sales (dictionary with total sales and ticket count) for placed,", "flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import", "import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status +", "type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize id", "for orders grouped by status \"\"\" class Meta: type_ =", "= sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'),", "sales for orders grouped by status \"\"\" class Meta: type_", ".outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id", "total sales and ticket count) for placed, completed and pending", "orders and subsequently accumulates sales by status \"\"\" def query(self,", "'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize id = fields.String()", "== discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'), ) schema", "= fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\" Returns sales (dictionary with", "import DiscountCode from app.models.event import Event from app.models.order import Order,", "\\ .group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status) \\ .cte()", "from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask", "Order, OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'),", "from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'),", "(api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer = { 'model': Event,", "methods = ['GET'] decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema", "@staticmethod def calc_sales(obj): \"\"\" Returns sales (dictionary with total sales", "AdminSalesDiscountedList(ResourceList): \"\"\" Resource for sales by marketer. Joins event marketer", "== DiscountCode.id) \\ .cte() return self.session.query(discounts, pending, completed, placed) \\", "from sqlalchemy import func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi", "name, discount code, marketer mail, count of tickets and total", ".filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status ==", "Returns sales (dictionary with total sales and ticket count) for", "{ 'model': Event, 'session': db.session, 'methods': { 'query': query }", "= fields.String() code = fields.String() email = fields.String() event_name =", "= sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts =", "\\ .cte() return self.session.query(discounts, pending, completed, placed) \\ .outerjoin(pending, (pending.c.event_id", "by status \"\"\" def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed", "\\ .filter(Order.marketer_id == User.id) \\ .filter(Order.discount_code_id == DiscountCode.id) \\ .filter(Order.status", "\"\"\" Resource for sales by marketer. Joins event marketer and", "\\ .filter(Order.status == status) \\ .group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User)", "pending, completed, placed) \\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id", "'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets", "discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators =", "Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize", "orders grouped by status \"\"\" class Meta: type_ = 'admin-sales-discounted'", "calc_sales(obj): \"\"\" Returns sales (dictionary with total sales and ticket", "0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList):", "\\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) &", "sales by marketer. Joins event marketer and orders and subsequently", "app.models import db from app.models.discount_code import DiscountCode from app.models.event import", "status) \\ .group_by(Event) \\ .group_by(DiscountCode) \\ .group_by(User) \\ .group_by(Order.status) \\", "res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0", "Provides Event name, discount code, marketer mail, count of tickets", "== discounts.c.marketer_id)) \\ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id ==", "return self.session.query(discounts, pending, completed, placed) \\ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id)", "DiscountCode from app.models.event import Event from app.models.order import Order, OrderTicket", "\\ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) &", "Discounted sales by event Provides Event name, discount code, marketer", "and subsequently accumulates sales by status \"\"\" def query(self, _):", "db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \\", "fields.Method('calc_sales') @staticmethod def calc_sales(obj): \"\"\" Returns sales (dictionary with total" ]
[ "from .lex_attrs import LEX_ATTRS from ...language import Language class SerbianDefaults(Language.Defaults):", "import LEX_ATTRS from ...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions =", "= STOP_WORDS class Serbian(Language): lang = \"sr\" Defaults = SerbianDefaults", "TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language): lang", ".stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import", "from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from ...language", "class Serbian(Language): lang = \"sr\" Defaults = SerbianDefaults __all__ =", "LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language): lang = \"sr\" Defaults", "lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language): lang =", "Serbian(Language): lang = \"sr\" Defaults = SerbianDefaults __all__ = [\"Serbian\"]", "stop_words = STOP_WORDS class Serbian(Language): lang = \"sr\" Defaults =", "import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from ...language import Language", "tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class", "from ...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters", "class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words =", "from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs", "import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS", "TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from ...language import Language class", "STOP_WORDS class Serbian(Language): lang = \"sr\" Defaults = SerbianDefaults __all__", "= LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language): lang = \"sr\"", "...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters =", "Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words", ".tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from ...language import", "import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS", ".lex_attrs import LEX_ATTRS from ...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions", "SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS", "STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from", "= TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language):", "LEX_ATTRS from ...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS" ]
[ "deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool", "from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack,", "ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',", "'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 'deform_roi_pooling' ]", "deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv import (DeformConv,", "ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [", "__all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack',", "= [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack', 'deform_conv',", ".modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling,", "(DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack)", "[ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',", "DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ =", "import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,", "(DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack',", "from .functions.deform_conv import deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from", ".functions.deform_pool import deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack)", ".functions.deform_conv import deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv", ".modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack',", "from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import", "'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 'deform_roi_pooling'", "from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv',", "<reponame>TJUsym/TJU_Advanced_CV_Homework from .functions.deform_conv import deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling", "DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling',", "import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',", "ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__", "import deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from", "import deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv import", "modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv," ]
[ "instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count) return instance", "coding: utf-8 from rest_framework import serializers from data.models import Skill,", "# coding: utf-8 from rest_framework import serializers from data.models import", "'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj):", "return Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name)", "self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count =", "Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model", "import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta:", "@staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name,", "validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count) return", "serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list =", "skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields =", "= serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self, instance,", "= count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField()", "validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name',", "] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self,", "Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count", "def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count):", "[ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment'", "__init__(self, name, count): self.name = name self.count = count class", "'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return", "name, count): self.name = name self.count = count class CostarSerializer(serializers.Serializer):", "import serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list", "self.name = name self.count = count class CostarSerializer(serializers.Serializer): name =", "serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data):", "Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param',", "class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill", "'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object):", "instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count)", "Costar(object): def __init__(self, name, count): self.name = name self.count =", "name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return", "data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class", "SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model =", "= name self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255)", "def __init__(self, name, count): self.name = name self.count = count", "CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data):", "'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def", "serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def", "from rest_framework import serializers from data.models import Skill, SkillValue class", "def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count =", "name self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count", "def create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name", "'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod", "model = Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type',", "<gh_stars>1-10 # coding: utf-8 from rest_framework import serializers from data.models", "update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count',", "fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id',", "'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class", "SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name = name", "count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def", "'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id)", "rest_framework import serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer):", "serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields = [ 'skill_id',", "return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name =", "utf-8 from rest_framework import serializers from data.models import Skill, SkillValue", "count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self,", "class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self,", "create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name =", "= serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields = [", "class Meta: model = Skill fields = [ 'skill_id', 'target_unit',", "= Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num',", "from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True)", "'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ]", "get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name", "class Costar(object): def __init__(self, name, count): self.name = name self.count", "= serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data)", "Meta: model = Skill fields = [ 'skill_id', 'target_unit', 'target_member',", "= [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list',", "count): self.name = name self.count = count class CostarSerializer(serializers.Serializer): name", "'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def", "ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields" ]
[ "of the array print(image_np.shape) # Showing image with every channel", "'numpy.ndarray'> # Checking the shape of the array print(image_RGB_as_GreyScale.shape) #", "print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1],", ":, 0] * 0.299 + image_np[:, :, 1] * 0.587", "Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale = image_np[:, :, 0] *", "2] # Creating a figure with subplots f, ax =", "reshaping GreyScale image from 2D to 3D x = image_GreyScale.reshape((1080,", "image from 2D to 3D x = image_GreyScale.reshape((1080, 1920, 1))", ":, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] =", "specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way", "= image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all data in", "B') # Showing the plots plt.show() # Converting RGB image", "# Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys')", "= np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale image from", "will be (1080, 1920, 3) - which is tuple type", "to: # <NAME>. Image processing in Python // GitHub platform.", "result into new file # Importing needed libraries import numpy", "into GrayScale image # Using formula: # Y' = 0.299", "matplotlib.pyplot as plt from skimage import color from skimage import", "with zero elements # And by 'image_GreyScale.shape + tuple([3])' we", "from image data image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) #", "from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more way for", "the window with figure f.canvas.set_window_title('Eagle image in three channels R,", ":, 2] = x[:, :, 0] # Saving image into", "array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image into a", "GreyScale image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) # Checking", "window with figure f.canvas.set_window_title('Eagle image in three channels R, G", "Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function", "cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G,", "subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to make", "and B') # Showing the plots plt.show() # Converting RGB", "'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape) #", "# Saving converted image into a file from processed array", "scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more way for converting image_RGB_as_GreyScale =", "array for saving - creating three channels with the same", "a file from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more", "image_GreyScale = color.rgb2gray(image_RGB) # Checking the type of the array", "0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0] # Saving", "add one more element '3' to the tuple # Now", "0] ax0, ax1, ax2, ax3 = ax.flatten() # Adjusting first", "1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:,", ":, 0] channel_G = image_np[:, :, 1] channel_B = image_np[:,", "result into new file # Environment: PyCharm and Anaconda environment", "and saving result into new file # Importing needed libraries", "make distance between figures plt.tight_layout() # Giving the name to", "Converting_RGB_to_GreyScale.py # Description: Opening RGB image as array, converting to", "skimage import color from skimage import io import scipy.misc #", "ncols=2) # ax is (2, 2) np array and to", "plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for saving - creating", "And by 'image_GreyScale.shape + tuple([3])' we add one more element", "# One more way for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True)", "B image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale = image_np[:,", "image_np[:, :, 1] channel_B = image_np[:, :, 2] # Creating", "Opening RGB image as array, converting to GreyScale and saving", "from 2D to 3D x = image_GreyScale.reshape((1080, 1920, 1)) #", "2] = x[:, :, 0] # Saving image into a", "processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening", "# Preparing array for saving - creating three channels with", "1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) # Showing saved", "import Image import matplotlib.pyplot as plt from skimage import color", "subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third", "+ tuple([3])' we add one more element '3' to the", "color.rgb2gray(image_RGB) # Checking the type of the array print(type(image_GreyScale)) #", "R + 0.587 G + 0.114 B image_RGB = Image.open(\"images/eagle.jpg\")", "is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly,", "data image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) # Checking the", "np array and to make it easier to read we", "it easier to read we use 'flatten' function # Or", "License # Copyright (c) 2018 <NAME> # github.com/sichkar-valentyn # #", "print(type(image_np)) # <class 'numpy.ndarray'> # Checking the shape of the", ":, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :,", "obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image into", "the same data in each # Firstly, creating array with", "cmap='Greys') plt.show() # Saving converted image into a file from", "array and to make it easier to read we use", "ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red", "x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0]", "ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('')", "ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('')", "0.587 + image_np[:, :, 2] * 0.114 # Checking the", "was written with three channels and they are identical result_1", "= image_np[:, :, 0] * 0.299 + image_np[:, :, 1]", "image into GrayScale image # Using formula: # Y' =", ":, 2])) # Showing saved resulted image # Giving the", "RGB image into GreyScale image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale =", "'image_GreyScale.shape + tuple([3])' we add one more element '3' to", "image was written with three channels and they are identical", "image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0] # Saving image", "the array print(type(image_np)) # <class 'numpy.ndarray'> # Checking the shape", "image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale = image_np[:, :,", "image_np = np.array(image_RGB) image_GreyScale = image_np[:, :, 0] * 0.299", "# Giving the name to the window with figure plt.figure('GreyScaled", "one more element '3' to the tuple # Now the", "Copyright (c) 2018 <NAME> # github.com/sichkar-valentyn # # Reference to:", "an array from image data image_RGB = Image.open(\"images/eagle.jpg\") image_np =", "0.299 R + 0.587 G + 0.114 B image_RGB =", "plt.show() # Another way to convert RGB image into GreyScale", "we can call each time ax[0, 0] ax0, ax1, ax2,", "identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :,", "+ image_np[:, :, 1] * 0.587 + image_np[:, :, 2]", "image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing", "type of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking", ":, 1] * 0.587 + image_np[:, :, 2] * 0.114", "written with three channels and they are identical result_1 =", "Checking the shape of the array print(image_GreyScale.shape) # Giving the", "saving result into new file # Importing needed libraries import", "data in three channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :,", "with the same data in each # Firstly, creating array", "= color.rgb2gray(image_RGB) # Checking the type of the array print(type(image_GreyScale))", "print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the", "ax2, ax3 = ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds')", "plt.figure('GreyScaled image from RGB') # Here we don't need to", "with every channel separately channel_R = image_np[:, :, 0] channel_G", "ax1, ax2, ax3 = ax.flatten() # Adjusting first subplot ax0.imshow(channel_R,", "image from RGB') # Here we don't need to specify", "by 'image_GreyScale.shape + tuple([3])' we add one more element '3'", "creating three channels with the same data in each #", "ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('')", "# Another way to convert RGB image into GreyScale image", "channel separately channel_R = image_np[:, :, 0] channel_G = image_np[:,", "Another way to convert RGB image into GreyScale image image_RGB", "the tuple # Now the shape will be (1080, 1920,", "io import scipy.misc # Creating an array from image data", "fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to", "1] channel_B = image_np[:, :, 2] # Creating a figure", "make it easier to read we use 'flatten' function #", "the plots plt.show() # Converting RGB image into GrayScale image", "a figure with subplots f, ax = plt.subplots(nrows=2, ncols=2) #", "0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :,", "image # Giving the name to the window with figure", "= plt.subplots(nrows=2, ncols=2) # ax is (2, 2) np array", "easier to read we use 'flatten' function # Or we", "Creating a figure with subplots f, ax = plt.subplots(nrows=2, ncols=2)", "figures plt.tight_layout() # Giving the name to the window with", "<class 'numpy.ndarray'> # Checking the shape of the array print(image_np.shape)", "cmap='Greys') plt.show() # Preparing array for saving - creating three", "f, ax = plt.subplots(nrows=2, ncols=2) # ax is (2, 2)", "0.587 G + 0.114 B image_RGB = Image.open(\"images/eagle.jpg\") image_np =", "0.114 # Checking the type of the array print(type(image_GreyScale)) #", "using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image", "GreyScale and saving result into new file # Environment: PyCharm", "Y' = 0.299 R + 0.587 G + 0.114 B", "three channels with the same data in each # Firstly,", "to GreyScale and saving result into new file # Environment:", "image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :,", "3D x = image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all", "three channels and they are identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np", "Showing the plots plt.show() # Converting RGB image into GrayScale", "= image_np[:, :, 2] # Creating a figure with subplots", "# Description: Opening RGB image as array, converting to GreyScale", "shape will be (1080, 1920, 3) - which is tuple", "element '3' to the tuple # Now the shape will", "more element '3' to the tuple # Now the shape", "# # Reference to: # <NAME>. Image processing in Python", "result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1]))", "0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:,", "result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) #", "# Checking that image was written with three channels and", "elements # And by 'image_GreyScale.shape + tuple([3])' we add one", "np.array(image_RGB) # Checking the type of the array print(type(image_np)) #", "the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape", ":, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) # Showing", "window with figure plt.figure('GreyScaled image from RGB') # Here we", "in each # Firstly, creating array with zero elements #", "np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale image from 2D", "2018 <NAME> # github.com/sichkar-valentyn # # Reference to: # <NAME>.", "array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of", "Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603 #", "subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth", "image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale image", "Giving the name to the window with figure f.canvas.set_window_title('Eagle image", "plt.show() # Preparing array for saving - creating three channels", "'3' to the tuple # Now the shape will be", "ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('')", "of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the", "# github.com/sichkar-valentyn # # Reference to: # <NAME>. Image processing", "plots plt.show() # Converting RGB image into GrayScale image #", "result_1_np[:, :, 2])) # Showing saved resulted image # Giving", "figure plt.figure('GreyScaled image from RGB') # Showing the image by", "ax3 = ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('')", "= io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the type of the array", "plt.show() # Saving converted image into a file from processed", "MIT License # Copyright (c) 2018 <NAME> # github.com/sichkar-valentyn #", "Image import matplotlib.pyplot as plt from skimage import color from", "the name to the window with figure plt.figure('GreyScaled image from", "image_GreyScale_with_3_channels) # Checking that image was written with three channels", "Using formula: # Y' = 0.299 R + 0.587 G", "cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to convert RGB image", "image_np[:, :, 2] # Creating a figure with subplots f,", "= Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale = image_np[:, :, 0]", "from RGB') # Here we don't need to specify the", "three channels R, G and B') # Showing the plots", "1], result_1_np[:, :, 2])) # Showing saved resulted image #", "environment # # MIT License # Copyright (c) 2018 <NAME>", "more way for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) # Checking", "scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that image was written with three", "the window with figure plt.figure('GreyScaled image from RGB') # Here", "ax is (2, 2) np array and to make it", "array with zero elements # And by 'image_GreyScale.shape + tuple([3])'", "into a file from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One", "Preparing array for saving - creating three channels with the", "into a file from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) #", "image_np[:, :, 0] * 0.299 + image_np[:, :, 1] *", "image with every channel separately channel_R = image_np[:, :, 0]", "2) np array and to make it easier to read", "PIL import Image import matplotlib.pyplot as plt from skimage import", "Checking the shape of the array print(image_np.shape) # Showing image", "(c) 2018 <NAME> # github.com/sichkar-valentyn # # Reference to: #", "ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to make distance", "Reference to: # <NAME>. Image processing in Python // GitHub", "1)) # Finally, writing all data in three channels image_GreyScale_with_3_channels[:,", "# Creating an array from image data image_RGB = Image.open(\"images/eagle.jpg\")", "don't need to specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show()", "(2, 2) np array and to make it easier to", "# Reference to: # <NAME>. Image processing in Python //", "Showing image with every channel separately channel_R = image_np[:, :,", "array print(image_np.shape) # Showing image with every channel separately channel_R", "to read we use 'flatten' function # Or we can", "array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of", "file # Environment: PyCharm and Anaconda environment # # MIT", "the array print(image_GreyScale.shape) # Giving the name to the window", "Anaconda environment # # MIT License # Copyright (c) 2018", "ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original", "formula: # Y' = 0.299 R + 0.587 G +", "the shape of the array print(image_RGB_as_GreyScale.shape) # Giving the name", "all data in three channels image_GreyScale_with_3_channels[:, :, 0] = x[:,", "= image_np[:, :, 0] channel_G = image_np[:, :, 1] channel_B", "figure with subplots f, ax = plt.subplots(nrows=2, ncols=2) # ax", "is (2, 2) np array and to make it easier", "image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the type of the", "2D to 3D x = image_GreyScale.reshape((1080, 1920, 1)) # Finally,", "tuple([3])) # Secondly, reshaping GreyScale image from 2D to 3D", "0.114 B image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale =", "<class 'numpy.ndarray'> # Checking the shape of the array print(image_RGB_as_GreyScale.shape)", "Saving converted image into a file from processed array scipy.misc.imsave(\"images/result_2.jpg\",", "= Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:,", "io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the type of the array print(type(image_RGB_as_GreyScale))", "ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('')", "import matplotlib.pyplot as plt from skimage import color from skimage", "image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) # Checking the type", "of the array print(type(image_np)) # <class 'numpy.ndarray'> # Checking the", "ax3.set_ylabel('') ax3.set_title('Original image') # Function to make distance between figures", "image_np = np.array(image_RGB) # Checking the type of the array", "into GreyScale image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) #", "# Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') #", "file # Importing needed libraries import numpy as np from", "channels R, G and B') # Showing the plots plt.show()", "saving result into new file # Environment: PyCharm and Anaconda", "# Showing the image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys')", "shape of the array print(image_GreyScale.shape) # Giving the name to", "ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to make distance between", "image_np[:, :, 0] channel_G = image_np[:, :, 1] channel_B =", "type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale", "<class 'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape)", "ax[0, 0] ax0, ax1, ax2, ax3 = ax.flatten() # Adjusting", "+ image_np[:, :, 2] * 0.114 # Checking the type", "needed libraries import numpy as np from PIL import Image", "ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues')", "way for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the", "Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') #", "# Function to make distance between figures plt.tight_layout() # Giving", "need to specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show() #", "plt.show() # Converting RGB image into GrayScale image # Using", "Converting RGB image into GrayScale image # Using formula: #", "# Finally, writing all data in three channels image_GreyScale_with_3_channels[:, :,", "the type of the array print(type(image_np)) # <class 'numpy.ndarray'> #", "writing all data in three channels image_GreyScale_with_3_channels[:, :, 0] =", "print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) # Showing saved resulted", "'flatten' function # Or we can call each time ax[0,", "the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() #", "Showing saved resulted image # Giving the name to the", "name to the window with figure plt.figure('GreyScaled image from RGB')", "RGB image as array, converting to GreyScale and saving result", "numpy as np from PIL import Image import matplotlib.pyplot as", "Checking the type of the array print(type(image_np)) # <class 'numpy.ndarray'>", ":, 1] channel_B = image_np[:, :, 2] # Creating a", "array print(image_GreyScale.shape) # Giving the name to the window with", "RGB') # Here we don't need to specify the map", "github.com/sichkar-valentyn # # Reference to: # <NAME>. Image processing in", "* 0.114 # Checking the type of the array print(type(image_GreyScale))", "platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image as array, converting", "saved resulted image # Giving the name to the window", "of the array print(image_GreyScale.shape) # Giving the name to the", "we don't need to specify the map like cmap='Greys' plt.imshow(result_1_np)", "# And by 'image_GreyScale.shape + tuple([3])' we add one more", "ax = plt.subplots(nrows=2, ncols=2) # ax is (2, 2) np", "# ax is (2, 2) np array and to make", "resulted image # Giving the name to the window with", "# Using formula: # Y' = 0.299 R + 0.587", "to specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another", "every channel separately channel_R = image_np[:, :, 0] channel_G =", "obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image into", "that image was written with three channels and they are", "= ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('')", "# Giving the name to the window with figure f.canvas.set_window_title('Eagle", "channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :,", "image_np[:, :, 2] * 0.114 # Checking the type of", "into new file # Environment: PyCharm and Anaconda environment #", "'numpy.ndarray'> # Checking the shape of the array print(image_np.shape) #", "the name to the window with figure f.canvas.set_window_title('Eagle image in", "libraries import numpy as np from PIL import Image import", "# Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel')", "Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') #", "converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the type of", "# Showing the plots plt.show() # Converting RGB image into", "PyCharm and Anaconda environment # # MIT License # Copyright", "// GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image as", "array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for saving -", ":, 1], result_1_np[:, :, 2])) # Showing saved resulted image", "# Now the shape will be (1080, 1920, 3) -", "channel_R = image_np[:, :, 0] channel_G = image_np[:, :, 1]", "name to the window with figure f.canvas.set_window_title('Eagle image in three", "2])) # Showing saved resulted image # Giving the name", "# Checking the type of the array print(type(image_RGB_as_GreyScale)) # <class", "a file from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking", "channels with the same data in each # Firstly, creating", "the array print(image_np.shape) # Showing image with every channel separately", "# Firstly, creating array with zero elements # And by", "obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for saving", "Showing the image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show()", "ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot", "Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show()", "Checking the shape of the array print(image_RGB_as_GreyScale.shape) # Giving the", "ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot", "image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving", "Creating an array from image data image_RGB = Image.open(\"images/eagle.jpg\") image_np", "as plt from skimage import color from skimage import io", "# Saving image into a file from obtained 3D array", "Environment: PyCharm and Anaconda environment # # MIT License #", "we add one more element '3' to the tuple #", "plt from skimage import color from skimage import io import", "print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the", "tuple([3])' we add one more element '3' to the tuple", "plt.imshow(result_1_np) plt.show() # Another way to convert RGB image into", "image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all data in three", "3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that image was written", "saving - creating three channels with the same data in", "the array print(image_RGB_as_GreyScale.shape) # Giving the name to the window", "0] # Saving image into a file from obtained 3D", "obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that image was", "# Checking the shape of the array print(image_RGB_as_GreyScale.shape) # Giving", "# Checking the type of the array print(type(image_np)) # <class", "Or we can call each time ax[0, 0] ax0, ax1,", "image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1]", "RGB') # Showing the image by using obtained array plt.imshow(image_GreyScale,", "array, converting to GreyScale and saving result into new file", "read we use 'flatten' function # Or we can call", "# Showing saved resulted image # Giving the name to", "One more way for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) #", ":, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] =", "1] * 0.587 + image_np[:, :, 2] * 0.114 #", "GrayScale image # Using formula: # Y' = 0.299 R", "plt.figure('GreyScaled image from RGB') # Showing the image by using", "Secondly, reshaping GreyScale image from 2D to 3D x =", "image as array, converting to GreyScale and saving result into", "ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot", "channel_G = image_np[:, :, 1] channel_B = image_np[:, :, 2]", "+ tuple([3])) # Secondly, reshaping GreyScale image from 2D to", "subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second", "image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2]", "image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) # Checking the", "Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') #", "= x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :,", "# Environment: PyCharm and Anaconda environment # # MIT License", "# Or we can call each time ax[0, 0] ax0,", "map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to convert", "image_np[:, :, 1] * 0.587 + image_np[:, :, 2] *", "Saving converted image into a file from processed array scipy.misc.imsave(\"images/result_3.jpg\",", "array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image into a", "+ 0.114 B image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) image_GreyScale", "Checking the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'>", "array print(type(image_np)) # <class 'numpy.ndarray'> # Checking the shape of", "for saving - creating three channels with the same data", "# Showing image with every channel separately channel_R = image_np[:,", "= np.array(image_RGB) # Checking the type of the array print(type(image_np))", "ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens')", "second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting", "the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape", "ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('')", "# Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel')", "# File: Converting_RGB_to_GreyScale.py # Description: Opening RGB image as array,", "print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:,", "# Converting RGB image into GrayScale image # Using formula:", "print(image_RGB_as_GreyScale.shape) # Giving the name to the window with figure", "Here we don't need to specify the map like cmap='Greys'", "channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image')", "zero elements # And by 'image_GreyScale.shape + tuple([3])' we add", "import numpy as np from PIL import Image import matplotlib.pyplot", "like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to convert RGB", "to make it easier to read we use 'flatten' function", "third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting", "plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file", "io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) # Checking the type of the", "G and B') # Showing the plots plt.show() # Converting", "Python // GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image", "= 0.299 R + 0.587 G + 0.114 B image_RGB", "type of the array print(type(image_np)) # <class 'numpy.ndarray'> # Checking", "GreyScale and saving result into new file # Importing needed", "= x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :,", "and they are identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1)", "converted image into a file from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale)", "# Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel')", "of the array print(image_RGB_as_GreyScale.shape) # Giving the name to the", "image in three channels R, G and B') # Showing", "DOI: 10.5281/zenodo.1343603 # Opening RGB image as array, converting to", "Checking that image was written with three channels and they", "the shape of the array print(image_GreyScale.shape) # Giving the name", "by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted", "Firstly, creating array with zero elements # And by 'image_GreyScale.shape", "- creating three channels with the same data in each", "image') # Function to make distance between figures plt.tight_layout() #", "image into a file from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels)", "in three channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0]", "<NAME> # github.com/sichkar-valentyn # # Reference to: # <NAME>. Image", "are identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:,", "tuple # Now the shape will be (1080, 1920, 3)", "+ 0.587 G + 0.114 B image_RGB = Image.open(\"images/eagle.jpg\") image_np", "scipy.misc # Creating an array from image data image_RGB =", "RGB image into GrayScale image # Using formula: # Y'", "by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted", "Importing needed libraries import numpy as np from PIL import", "converting to GreyScale and saving result into new file #", "file from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more way", "to GreyScale and saving result into new file # Importing", "window with figure plt.figure('GreyScaled image from RGB') # Showing the", "Description: Opening RGB image as array, converting to GreyScale and", "= np.array(image_RGB) image_GreyScale = image_np[:, :, 0] * 0.299 +", "figure f.canvas.set_window_title('Eagle image in three channels R, G and B')", "= Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) # Checking the type of", "image # Using formula: # Y' = 0.299 R +", "# MIT License # Copyright (c) 2018 <NAME> # github.com/sichkar-valentyn", "use 'flatten' function # Or we can call each time", "array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more way for converting image_RGB_as_GreyScale", ":, 2] # Creating a figure with subplots f, ax", "plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file", "we use 'flatten' function # Or we can call each", "shape of the array print(image_RGB_as_GreyScale.shape) # Giving the name to", "convert RGB image into GreyScale image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale", "and saving result into new file # Environment: PyCharm and", "image into a file from processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) #", "# Checking the type of the array print(type(image_GreyScale)) # <class", "array print(image_RGB_as_GreyScale.shape) # Giving the name to the window with", "of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the", "(1080, 1920, 3) - which is tuple type image_GreyScale_with_3_channels =", "0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :,", "in three channels R, G and B') # Showing the", "channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue", "and Anaconda environment # # MIT License # Copyright (c)", "type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking", "new file # Environment: PyCharm and Anaconda environment # #", "to the tuple # Now the shape will be (1080,", "the map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to", "to make distance between figures plt.tight_layout() # Giving the name", "File: Converting_RGB_to_GreyScale.py # Description: Opening RGB image as array, converting", "RGB') # Showing the image by using obtained array plt.imshow(image_RGB_as_GreyScale,", "print(image_np.shape) # Showing image with every channel separately channel_R =", "the shape of the array print(image_np.shape) # Showing image with", "Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :,", "# Y' = 0.299 R + 0.587 G + 0.114", "ax3.set_title('Original image') # Function to make distance between figures plt.tight_layout()", "image_GreyScale) # One more way for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\",", "function # Or we can call each time ax[0, 0]", "can call each time ax[0, 0] ax0, ax1, ax2, ax3", "the type of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> #", "<NAME>. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603", "np from PIL import Image import matplotlib.pyplot as plt from", "data in each # Firstly, creating array with zero elements", "Finally, writing all data in three channels image_GreyScale_with_3_channels[:, :, 0]", "R, G and B') # Showing the plots plt.show() #", "image from RGB') # Showing the image by using obtained", "with figure f.canvas.set_window_title('Eagle image in three channels R, G and", ":, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0] #", "distance between figures plt.tight_layout() # Giving the name to the", "to the window with figure f.canvas.set_window_title('Eagle image in three channels", "tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping", "into new file # Importing needed libraries import numpy as", "for converting image_RGB_as_GreyScale = io.imread(\"images/eagle.jpg\", as_gray=True) # Checking the type", "with figure plt.figure('GreyScaled image from RGB') # Here we don't", "using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for", "array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that image was written with", "image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving", "which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) #", "image data image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) # Checking", "first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting", "channel_B = image_np[:, :, 2] # Creating a figure with", "as array, converting to GreyScale and saving result into new", "creating array with zero elements # And by 'image_GreyScale.shape +", "= x[:, :, 0] # Saving image into a file", "GreyScale image from 2D to 3D x = image_GreyScale.reshape((1080, 1920,", "call each time ax[0, 0] ax0, ax1, ax2, ax3 =", "be (1080, 1920, 3) - which is tuple type image_GreyScale_with_3_channels", "import scipy.misc # Creating an array from image data image_RGB", "three channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:,", "* 0.587 + image_np[:, :, 2] * 0.114 # Checking", "figure plt.figure('GreyScaled image from RGB') # Here we don't need", "color from skimage import io import scipy.misc # Creating an", "# Here we don't need to specify the map like", "ax0, ax1, ax2, ax3 = ax.flatten() # Adjusting first subplot", "3) - which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape +", "image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) # Checking the type", "shape of the array print(image_np.shape) # Showing image with every", "the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> #", ":, 0] # Saving image into a file from obtained", "to convert RGB image into GreyScale image image_RGB = io.imread(\"images/eagle.jpg\")", "10.5281/zenodo.1343603 # Opening RGB image as array, converting to GreyScale", "new file # Importing needed libraries import numpy as np", "by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array", "= io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB) # Checking the type of", "GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image as array,", "converted image into a file from processed array scipy.misc.imsave(\"images/result_3.jpg\", image_RGB_as_GreyScale)", "array from image data image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB)", "0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2]))", "# Secondly, reshaping GreyScale image from 2D to 3D x", "f.canvas.set_window_title('Eagle image in three channels R, G and B') #", ":, 2] * 0.114 # Checking the type of the", "the window with figure plt.figure('GreyScaled image from RGB') # Showing", "same data in each # Firstly, creating array with zero", "Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB) # Checking the type of the", "# Checking the shape of the array print(image_np.shape) # Showing", "import color from skimage import io import scipy.misc # Creating", "and to make it easier to read we use 'flatten'", "1920, 1)) # Finally, writing all data in three channels", "Saving image into a file from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\",", "each time ax[0, 0] ax0, ax1, ax2, ax3 = ax.flatten()", "# # MIT License # Copyright (c) 2018 <NAME> #", "import io import scipy.misc # Creating an array from image", "from skimage import io import scipy.misc # Creating an array", "cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B,", "print(image_GreyScale.shape) # Giving the name to the window with figure", "Checking the type of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'>", "= np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:,", "1920, 3) - which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape", "file from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that", "processed array scipy.misc.imsave(\"images/result_2.jpg\", image_GreyScale) # One more way for converting", "np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :,", "with figure plt.figure('GreyScaled image from RGB') # Showing the image", "G + 0.114 B image_RGB = Image.open(\"images/eagle.jpg\") image_np = np.array(image_RGB)", "x[:, :, 0] # Saving image into a file from", "as_gray=True) # Checking the type of the array print(type(image_RGB_as_GreyScale)) #", "# <class 'numpy.ndarray'> # Checking the shape of the array", "with subplots f, ax = plt.subplots(nrows=2, ncols=2) # ax is", "with three channels and they are identical result_1 = Image.open(\"images/result_1.jpg\")", "channels and they are identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np =", "= image_np[:, :, 1] channel_B = image_np[:, :, 2] #", "x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0]", "from skimage import color from skimage import io import scipy.misc", "- which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3]))", "to 3D x = image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing", "result_1 = Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0],", "image into GreyScale image image_RGB = io.imread(\"images/eagle.jpg\") image_GreyScale = color.rgb2gray(image_RGB)", "using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image", "2] * 0.114 # Checking the type of the array", "from RGB') # Showing the image by using obtained array", "in Python // GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB", "way to convert RGB image into GreyScale image image_RGB =", "Now the shape will be (1080, 1920, 3) - which", "as np from PIL import Image import matplotlib.pyplot as plt", "0] channel_G = image_np[:, :, 1] channel_B = image_np[:, :,", "the shape will be (1080, 1920, 3) - which is", "channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green", "* 0.299 + image_np[:, :, 1] * 0.587 + image_np[:,", "# Copyright (c) 2018 <NAME> # github.com/sichkar-valentyn # # Reference", "plt.subplots(nrows=2, ncols=2) # ax is (2, 2) np array and", "# Checking the shape of the array print(image_GreyScale.shape) # Giving", "each # Firstly, creating array with zero elements # And", "# <NAME>. Image processing in Python // GitHub platform. DOI:", "they are identical result_1 = Image.open(\"images/result_1.jpg\") result_1_np = np.array(result_1) print(result_1_np.shape)", "# Creating a figure with subplots f, ax = plt.subplots(nrows=2,", "cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np)", "Function to make distance between figures plt.tight_layout() # Giving the", "time ax[0, 0] ax0, ax1, ax2, ax3 = ax.flatten() #", "# Opening RGB image as array, converting to GreyScale and", "skimage import io import scipy.misc # Creating an array from", ":, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:,", "from PIL import Image import matplotlib.pyplot as plt from skimage", "np.array(image_RGB) image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:,", "from obtained 3D array scipy.misc.imsave(\"images/result_1.jpg\", image_GreyScale_with_3_channels) # Checking that image", "0] * 0.299 + image_np[:, :, 1] * 0.587 +", "plt.tight_layout() # Giving the name to the window with figure", "# Importing needed libraries import numpy as np from PIL", "separately channel_R = image_np[:, :, 0] channel_G = image_np[:, :,", "between figures plt.tight_layout() # Giving the name to the window", "Giving the name to the window with figure plt.figure('GreyScaled image", "x = image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all data", "the image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() #", "to the window with figure plt.figure('GreyScaled image from RGB') #", "subplots f, ax = plt.subplots(nrows=2, ncols=2) # ax is (2," ]
[ "+ node.title + \"</br>\" if node.properties: for k, v in", "role_map = {} for role in all_roles: role_map[role] = color_for_role(role,", "v in node.properties.items(): desc = desc + k + \":", "= { 'id': counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type,", "all_roles=all_roles) with open(output_name, \"w+\") as resource_file: resource_file.write(html) def get_description(node): desc", "node.properties.items(): desc = desc + k + \": \" +", "% tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85))", "} description = get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\",", "{ 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label:", "def format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids = {}", "5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold'", "= sanitise_role(edge.role) if edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value))", "'Font Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(),", "format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles):", "edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value)) return nodes_list, edges_list", "'') \\ .lower() \\ .replace('writer', 'editor') \\ .replace('reader', 'viewer') def", "template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html =", "def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges =", "resource_file.write(html) def get_description(node): desc = node.get_type_name() + \"</br>\" if node.title:", "{} for counter, node in enumerate(nodes): node_ids[node.id] = counter value", "edges_list = [] for edge in edges: value = {", "'arrows': 'to', } if edge.label: value['label'] = edge.label if edge.title:", "255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return", "in node.properties.items(): desc = desc + k + \": \"", "str(v) + \"</br>\" return desc def render(nodes, edges, output_name): color_map", "[] node_ids = {} for counter, node in enumerate(nodes): node_ids[node.id]", "desc def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges", "return desc def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes,", "'label': node.name, 'type': node.node_type, 'icon': { 'face': 'Font Awesome 5", "color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map def format_graph(nodes, edges,", "value['title'] = edge.title value['role'] = sanitise_role(edge.role) if edge.role else 'other'", "import json from jinja2 import Environment, PackageLoader import graph def", "Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env", "{ 'face': 'Font Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(),", "roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map,", "\"w+\") as resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name() +", "output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles) return", "\\ .lower() \\ .replace('writer', 'editor') \\ .replace('reader', 'viewer') def roles_to_color_map(edges):", "node.title: desc = desc + node.title + \"</br>\" if node.properties:", "in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return", "'color': node.get_color(), 'weight': 'bold' } } description = get_description(node) if", "role_map['other'] = '#00c0ff' return role_map def format_graph(nodes, edges, role_color_map): nodes_list", "value['label'] = edge.label if edge.title: value['title'] = edge.title value['role'] =", "colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \\ .lower()", "edge in edges: value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id],", "= edge.title value['role'] = sanitise_role(edge.role) if edge.role else 'other' value['color']", "formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role)) /", "for e in edges if e.role}) role_map = {} for", "'#00c0ff' return role_map def format_graph(nodes, edges, role_color_map): nodes_list = []", "sanitise_role(edge.role) if edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value)) return", "roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in edges if e.role})", "'icon', 'label': node.name, 'type': node.node_type, 'icon': { 'face': 'Font Awesome", "value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = [] for edge", "if edge.label: value['label'] = edge.label if edge.title: value['title'] = edge.title", "in enumerate(nodes): node_ids[node.id] = counter value = { 'id': counter,", "color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' %", "template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\") as resource_file:", "= template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\") as", "'bold' } } description = get_description(node) if description: value['title'] =", "description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = [] for edge in edges:", "all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map", "role_map def format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids =", "if edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value)) return nodes_list,", "node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label'] =", "default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\") as resource_file: resource_file.write(html) def get_description(node):", "\" + str(v) + \"</br>\" return desc def render(nodes, edges,", "in edges if e.role}) role_map = {} for role in", "= color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map def format_graph(nodes,", "def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in edges if", "= format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role,", "def color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x'", "= desc + k + \": \" + str(v) +", "jinja2 import Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map,", "desc + node.title + \"</br>\" if node.properties: for k, v", "formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name)", "} } description = get_description(node) if description: value['title'] = description", "= desc + node.title + \"</br>\" if node.properties: for k,", "return str(role).replace('roles/', '') \\ .lower() \\ .replace('writer', 'editor') \\ .replace('reader',", "get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list =", "len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255 for c in", "edges if e.role}) role_map = {} for role in all_roles:", "create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template =", "in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \\", "def get_description(node): desc = node.get_type_name() + \"</br>\" if node.title: desc", "= env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes,", "node.get_type_name() + \"</br>\" if node.title: desc = desc + node.title", "edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges,", "Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight':", "node.get_size(), 'color': node.get_color(), 'weight': 'bold' } } description = get_description(node)", "= get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list", "= description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = [] for edge in", "edge.title value['role'] = sanitise_role(edge.role) if edge.role else 'other' value['color'] =", "0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \\ .lower() \\ .replace('writer',", "counter value = { 'id': counter, 'shape': 'icon', 'label': node.name,", "node.title + \"</br>\" if node.properties: for k, v in node.properties.items():", "= [] for edge in edges: value = { 'from':", ".replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in", "+ \"</br>\" return desc def render(nodes, edges, output_name): color_map =", "type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\") as resource_file: resource_file.write(html) def", "def sanitise_role(role): return str(role).replace('roles/', '') \\ .lower() \\ .replace('writer', 'editor')", "nodes_list = [] node_ids = {} for counter, node in", "output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map)", "value['role'] = sanitise_role(edge.role) if edge.role else 'other' value['color'] = role_color_map[value['role']]", "e in edges if e.role}) role_map = {} for role", "render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes,", "\\ .replace('writer', 'editor') \\ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles =", "'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon': { 'face': 'Font", "role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map def", "node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label'] = edge.label if", "PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env =", "'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label']", "hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c) *", "in edges: value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows':", "= edge.label if edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role)", "node.get_color(), 'weight': 'bold' } } description = get_description(node) if description:", "\\ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e", "[] for edge in edges: value = { 'from': node_ids[edge.node_from.id],", "import Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name):", "desc + k + \": \" + str(v) + \"</br>\"", "all_roles = list({sanitise_role(e.role) for e in edges if e.role}) role_map", "'#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1,", "node in enumerate(nodes): node_ids[node.id] = counter value = { 'id':", "as resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name() + \"</br>\"", "= roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges,", "float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255 for", "\"</br>\" if node.title: desc = desc + node.title + \"</br>\"", "if node.properties: for k, v in node.properties.items(): desc = desc", "\": \" + str(v) + \"</br>\" return desc def render(nodes,", "return role_map def format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids", "edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role) if edge.role else", "json from jinja2 import Environment, PackageLoader import graph def create_html(formatted_nodes,", "+ k + \": \" + str(v) + \"</br>\" return", "resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name() + \"</br>\" if", "desc = desc + k + \": \" + str(v)", "1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \\ .lower() \\", "all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with", "format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids = {} for", "k, v in node.properties.items(): desc = desc + k +", "for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/',", "c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '')", "= Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys())", "role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters", "'editor') \\ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for", "\"</br>\" return desc def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges)", "\"</br>\" if node.properties: for k, v in node.properties.items(): desc =", "\"\\\\\")) edges_list = [] for edge in edges: value =", "html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\")", "= list({sanitise_role(e.role) for e in edges if e.role}) role_map =", "for role in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] =", "{} for role in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other']", "description = get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\"))", "'type': node.node_type, 'icon': { 'face': 'Font Awesome 5 Free', 'code':", "= [] node_ids = {} for counter, node in enumerate(nodes):", "list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles)", "get_description(node): desc = node.get_type_name() + \"</br>\" if node.title: desc =", "= counter value = { 'id': counter, 'shape': 'icon', 'label':", "= node.get_type_name() + \"</br>\" if node.title: desc = desc +", "all_roles): hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c)", "} if edge.label: value['label'] = edge.label if edge.title: value['title'] =", "formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def", "formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template')", "<filename>template_renderer.py<gh_stars>0 import colorsys import json from jinja2 import Environment, PackageLoader", "env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges,", ".lower() \\ .replace('writer', 'editor') \\ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles", "'to', } if edge.label: value['label'] = edge.label if edge.title: value['title']", "edge.label: value['label'] = edge.label if edge.title: value['title'] = edge.title value['role']", "= list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters,", "with open(output_name, \"w+\") as resource_file: resource_file.write(html) def get_description(node): desc =", "formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, \"w+\") as resource_file: resource_file.write(html)", "node.properties: for k, v in node.properties.items(): desc = desc +", "role_color_map): nodes_list = [] node_ids = {} for counter, node", "if e.role}) role_map = {} for role in all_roles: role_map[role]", "edges: value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to',", "edges, role_color_map): nodes_list = [] node_ids = {} for counter,", "value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', }", "all_roles) role_map['other'] = '#00c0ff' return role_map def format_graph(nodes, edges, role_color_map):", "for k, v in node.properties.items(): desc = desc + k", "counter, node in enumerate(nodes): node_ids[node.id] = counter value = {", "= {} for role in all_roles: role_map[role] = color_for_role(role, all_roles)", "import colorsys import json from jinja2 import Environment, PackageLoader import", "* 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role):", "print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name,", "return '#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue,", "+ str(v) + \"</br>\" return desc def render(nodes, edges, output_name):", "for counter, node in enumerate(nodes): node_ids[node.id] = counter value =", "value = { 'id': counter, 'shape': 'icon', 'label': node.name, 'type':", "desc = desc + node.title + \"</br>\" if node.properties: for", "open(output_name, \"w+\") as resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name()", "'face': 'Font Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color':", "node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' } } description", "node.name, 'type': node.node_type, 'icon': { 'face': 'Font Awesome 5 Free',", "if node.title: desc = desc + node.title + \"</br>\" if", "default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties,", "Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles)", "description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = [] for", "'.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html", "colorsys import json from jinja2 import Environment, PackageLoader import graph", "+ \"</br>\" if node.title: desc = desc + node.title +", "tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def", "list({sanitise_role(e.role) for e in edges if e.role}) role_map = {}", "output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters =", "role in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff'", "+ \": \" + str(v) + \"</br>\" return desc def", "counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon': { 'face':", "'weight': 'bold' } } description = get_description(node) if description: value['title']", "color_map, output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles)", "+ \"</br>\" if node.properties: for k, v in node.properties.items(): desc", "edge.label if edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role) if", "'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in edges", "enumerate(nodes): node_ids[node.id] = counter value = { 'id': counter, 'shape':", "{ 'id': counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon':", "/ len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255 for c", "env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys())", "str(role).replace('roles/', '') \\ .lower() \\ .replace('writer', 'editor') \\ .replace('reader', 'viewer')", "for edge in edges: value = { 'from': node_ids[edge.node_from.id], 'to':", "'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' } }", "node.node_type, 'icon': { 'face': 'Font Awesome 5 Free', 'code': node.get_font_code(),", "if description: value['title'] = description nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = []", "node_ids = {} for counter, node in enumerate(nodes): node_ids[node.id] =", "nodes_list.append(json.dumps(value).replace(\"\\\\\\\\\", \"\\\\\")) edges_list = [] for edge in edges: value", "graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.'))", "e.role}) role_map = {} for role in all_roles: role_map[role] =", "node_ids[node.id] = counter value = { 'id': counter, 'shape': 'icon',", "'id': counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon': {", "k + \": \" + str(v) + \"</br>\" return desc", ".replace('writer', 'editor') \\ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role)", "Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' }", "color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes,", "import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation',", "'icon': { 'face': 'Font Awesome 5 Free', 'code': node.get_font_code(), 'size':", "= '#00c0ff' return role_map def format_graph(nodes, edges, role_color_map): nodes_list =", "if edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role) if edge.role", "edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue", "color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue =", "= float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255", "create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role))", "desc = node.get_type_name() + \"</br>\" if node.title: desc = desc", "def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template", "= {} for counter, node in enumerate(nodes): node_ids[node.id] = counter", "'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' } } description =", "'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label'] = edge.label", "sanitise_role(role): return str(role).replace('roles/', '') \\ .lower() \\ .replace('writer', 'editor') \\", "= { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if", "from jinja2 import Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges," ]
[ "PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self):", "name, db, model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def", "import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB,", "# and/or other materials provided with the distribution. # #", "subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen in", "help='specify data type that will be storen in the database',", "permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n',", "help='specify input csv files with this format : file1,file2,file3', action=extract_file_names,", "must reproduce the above copyright notice, # this list of", "BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import", "self.component_group_name not in config: print('CLI error : no ' +", "above copyright notice, # this list of conditions and the", "lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory):", "from powerapi.puller import PullerActor from powerapi.pusher import PusherActor def enable_log(arg,", "# # Redistribution and use in source and binary forms,", "return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True)", "self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input : --db_output database_name ARG1", "import store_true from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import", "a name already bound to another database factory in the", "exn: msg = 'CLI error : unknow argument ' +", "with the distribution. # # * Neither the name of", "raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter", "subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database", "database output : --db_output database_name ARG1 ARG2 ...') subparser_csv_output =", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "database input : --db_output database_name ARG1 ARG2 ... ') subparser_influx_output", "subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller", "model factory with a name already bound to another model", "= self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db, model, main_config['stream'],", "ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory", "exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn: msg = 'CLI", "print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name, component_config, main_config):", "database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input,", "help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int)", ": argument ' + exn.argument_name + ' : expect '", "as exn: msg = 'CLI error : unknow argument '", "return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter):", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT", "GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS;", "subparser_csv_input.add_argument('f', 'files', help='specify input csv files with this format :", "model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def", "main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db =", "report_filter def _actor_factory(self, name, db, model, stream_mode, level_logger): return PullerActor(name,", "from powerapi.pusher import PusherActor def enable_log(arg, val, args, acc): acc[arg]", "'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory", "a database output : --db_output database_name ARG1 ARG2 ...') subparser_csv_output", "2018, INRIA # Copyright (c) 2018, University of Lille #", "+ exn.argument_name + ' : ' + exn.msg print(msg, file=sys.stderr)", "from # this software without specific prior written permission. #", "A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL", "component_group_name def generate(self, config): if self.component_group_name not in config: print('CLI", "db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self,", "db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self,", "msg = 'CLI error : argument ' + exn.argument_name +", "self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input : --db_output database_name ARG1", "be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller", "notice, this # list of conditions and the following disclaimer.", "os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val, args, acc): acc[arg]", "MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection')", "db_config, main_config) model = self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name,", "subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output", "class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to add to", "when attempting to add to a DBActorGenerator a model factory", "def _actor_factory(self, name, db, model, stream_mode, level_logger): raise NotImplementedError() class", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true,", "prior written permission. # THIS SOFTWARE IS PROVIDED BY THE", "endorse or promote products derived from # this software without", "copyright notice, this # list of conditions and the following", "input : --db_output database_name ARG1 ARG2 ... ') subparser_mongo_output =", "in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb')", "self.parse(sys.argv[1:]) except BadValueException as exn: msg = 'CLI error :", "in exn.context_list: msg += '\\n --' + main_arg_name + '", "openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name',", "args, acc def check_csv_files(files): return reduce(lambda acc, f: acc and", "puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input :", "SUCH DAMAGE. import os import sys import logging from functools", "import PullerActor from powerapi.pusher import PusherActor def enable_log(arg, val, args,", "correct context\\nUse it with the following arguments :' for main_arg_name,", "source and binary forms, with or without # modification, are", "component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(),", "this software without specific prior written permission. # THIS SOFTWARE", "--db_output database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory',", "file=sys.stderr) sys.exit() actors = {} for component_type, components_list in config[self.component_group_name].items():", "+ main_arg_name + ' ' + context_name print(msg, file=sys.stderr) sys.exit()", "ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to add to a", "PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE", "exn.argument_name + ' : expect a value' print(msg, file=sys.stderr) except", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", ": --db_output database_name ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv')", "except UnknowArgException as exn: msg = 'CLI error : unknow", "' + context_name print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self,", "subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric", "name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output : --db_output", "exn.argument_name + ' : ' + exn.msg print(msg, file=sys.stderr) except", "MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that will", "def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\"", "name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name',", "connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify", "_actor_factory(self, name, db, model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator):", "PusherActor def enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG return", "stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self,", "puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input :", "' msg += exn.article + ' ' + exn.type_name print(msg,", "BadTypeException as exn: msg = 'CLI error : argument '", "level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input')", "subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files with", "args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True,", "= ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify", "connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that will", "data type that will be storen in the database', default='PowerReport')", "of source code must retain the above copyright notice, this", "acc[arg] = logging.DEBUG return args, acc def check_csv_files(files): return reduce(lambda", "SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR", "factory with a name already bound to another model factory", "HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory =", "database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m',", "database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.", "collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify", "+ component_type print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name,", "DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self, name, db, model,", "DBActorGenerator a model factory with a name already bound to", "OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #", "' : expect ' msg += exn.article + ' '", "Copyright (c) 2018, INRIA # Copyright (c) 2018, University of", "a database factory with a name already bound to another", "__init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport':", "type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen", "'directory', help='specify directory where where output csv files will be", ": --db_output database_name ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb')", "be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that will be", "subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen in", "and binary forms, with or without # modification, are permitted", "level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name,", "names of its # contributors may be used to endorse", "argument ' + exn.argument_name msg += ' not used in", "in config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try: actors[component_name] =", "MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection',", "name, db, model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model,", "ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files with this format", "# * Neither the name of the copyright holder nor", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db,", "db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'], files=[]", "= ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify", "BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "already bound to another model factory in the DBActorGenerator \"\"\"", "source code must retain the above copyright notice, this #", "exn.argument_name msg += ' not used in the correct context\\nUse", "be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller", "subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify", "subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection", "storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name',", "not used in the correct context\\nUse it with the following", "no ' + self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors", "database input : --db_output database_name ARG1 ARG2 ... ') subparser_mongo_output", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "+ exn.msg print(msg, file=sys.stderr) except MissingValueException as exn: msg =", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "not in config: print('CLI error : no ' + self.component_group_name", "= 'CLI error : argument ' + exn.args[0] msg +=", "print(msg, file=sys.stderr) except BadContextException as exn: msg = 'CLI error", "default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input :", "WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE", "specific prior written permission. # THIS SOFTWARE IS PROVIDED BY", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import", "return self.parse(sys.argv[1:]) except BadValueException as exn: msg = 'CLI error", "args, acc): acc[arg] = val.split(',') return args, acc class CommonCLIParser(MainParser):", "University of Lille # All rights reserved. # # Redistribution", "db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config):", "notice, # this list of conditions and the following disclaimer", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES;", "as exn: msg = 'CLI error : argument ' +", "def _actor_factory(self, name, db, model, stream_mode, level_logger): return PusherActor(name, model,", "self._gen_actor(component_type, component_config, config) except KeyError as exn: msg = 'CLI", "other materials provided with the distribution. # # * Neither", "powerapi.puller import PullerActor from powerapi.pusher import PusherActor def enable_log(arg, val,", "that will be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name',", "db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if model_name in self.model_factory:", "subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output csv files will", "help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input", "database input : --db_output database_name ARG1 ARG2 ... ') subparser_csv_input", "in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb')", "be storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a", "retain the above copyright notice, this # list of conditions", "ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB", "subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen in", "= { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "action=store_true, default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri',", "BadValueException as exn: msg = 'CLI error : argument '", "* Redistributions in binary form must reproduce the above copyright", "acc def check_csv_files(files): return reduce(lambda acc, f: acc and os.access(f,", "software without specific prior written permission. # THIS SOFTWARE IS", "subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB", "PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB", ": no ' + self.component_group_name + ' specified', file=sys.stderr) sys.exit()", "component_config, config) except KeyError as exn: msg = 'CLI error", "OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY", "from powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from", "name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input : --db_output", "a value' print(msg, file=sys.stderr) except BadTypeException as exn: msg =", "argument ' + exn.args[0] msg += ' needed with --output", "in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name,", "main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode, level_logger): raise NotImplementedError()", "stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class", "val, args, acc): acc[arg] = val.split(',') return args, acc class", "CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "are met: # # * Redistributions of source code must", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen", "disclaimer in the documentation # and/or other materials provided with", "BadContextException as exn: msg = 'CLI error : argument '", "to another model factory in the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException):", "self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config,", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "main_arg_name + ' ' + context_name print(msg, file=sys.stderr) sys.exit() class", "--' + main_arg_name + ' ' + context_name print(msg, file=sys.stderr)", "DBActorGenerator a database factory with a name already bound to", "csv files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type", "default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen", "ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "+ ' : expect ' msg += exn.article + '", "'CLI error : unknow argument ' + exn.argument_name print(msg, file=sys.stderr)", "def extract_file_names(arg, val, args, acc): acc[arg] = val.split(',') return args,", "ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model',", ": ' + exn.msg print(msg, file=sys.stderr) except MissingValueException as exn:", "'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection", "subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db',", "acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log,", "lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if", "disclaimer. # # * Redistributions in binary form must reproduce", "db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config:", "from powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException, MissingValueException from", "f: acc and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val,", "stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri')", "name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify", "help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB", "input : --db_output database_name ARG1 ARG2 ... ') subparser_csv_input =", "will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that will", "OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys", "self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config)", "powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB,", "name of the copyright holder nor the names of its", "'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name',", "'files', help='specify input csv files with this format : file1,file2,file3',", "subparser_csv_output, help_str='specify a database input : --db_output database_name ARG1 ARG2", "except KeyError as exn: msg = 'CLI error : argument", "input : --db_output database_name ARG1 ARG2 ... ') def parse_argv(self):", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "from functools import reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser", "expect ' msg += exn.article + ' ' + exn.type_name", "+ ' ' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException as", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that", "action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False,", "will be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify", "+= ' needed with --output ' + component_type print(msg, file=sys.stderr)", "provided with the distribution. # # * Neither the name", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "sys.exit() actors = {} for component_type, components_list in config[self.component_group_name].items(): for", "# Copyright (c) 2018, INRIA # Copyright (c) 2018, University", "' needed with --output ' + component_type print(msg, file=sys.stderr) sys.exit()", "\"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to add", "THE POSSIBILITY OF SUCH DAMAGE. import os import sys import", "InfluxDB, OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher import PusherActor", "import os import sys import logging from functools import reduce", "reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser", "default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify", "CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "Redistributions of source code must retain the above copyright notice,", "name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input : --db_output", "import PusherActor def enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG", ": expect ' msg += exn.article + ' ' +", "component_group_name): self.component_group_name = component_group_name def generate(self, config): if self.component_group_name not", "val.split(',') return args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v',", "ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB", "for component_type, components_list in config[self.component_group_name].items(): for component_name, component_config in components_list.items():", "db_config else db_config['directory'], files=[] if 'files' not in db_config else", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT", "subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port',", "with this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or", "materials provided with the distribution. # # * Neither the", "default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output : --db_output database_name", "and/or other materials provided with the distribution. # # *", "documentation # and/or other materials provided with the distribution. #", "import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import", "def _gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name, db_config, main_config)", "help='specify directory where where output csv files will be writen')", "that the following conditions are met: # # * Redistributions", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "or without # modification, are permitted provided that the following", "OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "that will be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name',", "Redistribution and use in source and binary forms, with or", "code must retain the above copyright notice, this # list", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "in db_config else db_config['directory'], files=[] if 'files' not in db_config", "subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen in", "+ ' : expect a value' print(msg, file=sys.stderr) except BadTypeException", "InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), }", "class Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name def generate(self,", "HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT,", "model_name, model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] =", "+ exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn: msg =", "MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher", "= report_filter def _actor_factory(self, name, db, model, stream_mode, level_logger): return", "return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name,", "= 'CLI error : unknow argument ' + exn.argument_name print(msg,", "this # list of conditions and the following disclaimer. #", "' : ' + exn.msg print(msg, file=sys.stderr) except MissingValueException as", "main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode, level_logger): raise", "_gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception", "else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "'CLI error : argument ' + exn.argument_name msg += '", "__init__(self, component_group_name): self.component_group_name = component_group_name def generate(self, config): if self.component_group_name", "model_factory def add_db_factory(self, db_name, db_factory): if db_name in self.model_factory: raise", "read') subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen", "generate(self, config): if self.component_group_name not in config: print('CLI error :", "rights reserved. # # Redistribution and use in source and", "# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify", "def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model, stream_mode,", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING,", "argument ' + exn.argument_name + ' : expect a value'", "FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = { 'mongodb': lambda db_config:", "directory where where output csv files will be writen') subparser_csv_output.add_argument('m',", "= model_factory def add_db_factory(self, db_name, db_factory): if db_name in self.model_factory:", "' ' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn:", ": argument ' + exn.argument_name + ' : expect a", "nor the names of its # contributors may be used", "subparser_csv_input, help_str='specify a database input : --db_output database_name ARG1 ARG2", "file=sys.stderr) except UnknowArgException as exn: msg = 'CLI error :", "unknow argument ' + exn.argument_name print(msg, file=sys.stderr) except BadContextException as", "binary form must reproduce the above copyright notice, # this", "subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a", "factory with a name already bound to another database factory", "that will be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name',", "raised when attempting to add to a DBActorGenerator a database", "help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input", "help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will", "db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if", ": file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv files", "written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name',", "print(msg, file=sys.stderr) except MissingValueException as exn: msg = 'CLI error", "config) except KeyError as exn: msg = 'CLI error :", ": --db_output database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d',", "enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG return args, acc", "True) def extract_file_names(arg, val, args, acc): acc[arg] = val.split(',') return", "ARG1 ARG2 ... ') def parse_argv(self): try: return self.parse(sys.argv[1:]) except", "'CLI error : argument ' + exn.argument_name + ' :", ":' for main_arg_name, context_name in exn.context_list: msg += '\\n --'", ") subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify", "database_name ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files',", "in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError", "self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output')", "db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if model_name in", "ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory): if db_name", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #", "database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output,", "BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF", "'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name',", "raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to", "Exception raised when attempting to add to a DBActorGenerator a", "MainParser, ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException,", "... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri')", "in the correct context\\nUse it with the following arguments :'", "db_name, db_config, main_config): db = self._generate_db(db_name, db_config, main_config) model =", "ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input", "UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database", "in source and binary forms, with or without # modification,", "'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name')", "be read') subparser_csv_input.add_argument('m', 'model', help='specify data type that will be", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "distribution. # # * Neither the name of the copyright", "permitted provided that the following conditions are met: # #", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING", "list of conditions and the following disclaimer. # # *", "help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', )", "in the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #", "'output') def _actor_factory(self, name, db, model, stream_mode, level_logger): return PusherActor(name,", "the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output',", "output : --db_output database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv')", "reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True) def", "in the documentation # and/or other materials provided with the", "db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db", "of conditions and the following disclaimer in the documentation #", "type that will be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n',", "powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser", "actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError as exn: msg", "class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter", "another database factory in the DBActorGenerator \"\"\" class DBActorGenerator(Generator): def", "level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator):", "ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "sys import logging from functools import reduce from powerapi.exception import", "needed with --output ' + component_type print(msg, file=sys.stderr) sys.exit() return", "products derived from # this software without specific prior written", "add_model_factory(self, model_name, model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name]", "self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name, db_config,", "--db_output database_name ARG1 ARG2 ... ') def parse_argv(self): try: return", "...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where", "it with the following arguments :' for main_arg_name, context_name in", "... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri')", "BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel,", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND", "form must reproduce the above copyright notice, # this list", "output csv files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data", ": argument ' + exn.argument_name + ' : ' +", "'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name,", "MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c',", "the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output',", "bound to another model factory in the DBActorGenerator \"\"\" class", "import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser", "PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = { 'mongodb':", "without specific prior written permission. # THIS SOFTWARE IS PROVIDED", "use in source and binary forms, with or without #", "with a name already bound to another model factory in", "LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors = {} for", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from", "default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify", "+ exn.argument_name + ' : expect a value' print(msg, file=sys.stderr)", "' + exn.argument_name + ' : expect a value' print(msg,", "files.split(','), True) def extract_file_names(arg, val, args, acc): acc[arg] = val.split(',')", "extract_file_names(arg, val, args, acc): acc[arg] = val.split(',') return args, acc", "'stream', flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb')", "db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config", "port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that will be", "database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output,", "' + exn.argument_name + ' : ' + exn.msg print(msg,", "'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import", "+ exn.argument_name msg += ' not used in the correct", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name = db_config['name'] return", "action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv files couldn\\'t be", "help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port',", "context_name print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name", "try: return self.parse(sys.argv[1:]) except BadValueException as exn: msg = 'CLI", "that will be storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input,", "files with this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one", "ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB", "ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify", "type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type", "lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'],", "argument ' + exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn:", "MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not", "default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify", "and the following disclaimer. # # * Redistributions in binary", "# contributors may be used to endorse or promote products", "help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input", "input : --db_output database_name ARG1 ARG2 ... ') subparser_influx_output =", "db, model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self,", "attempting to add to a DBActorGenerator a model factory with", "raised when attempting to add to a DBActorGenerator a model", "database input : --db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output", "--output ' + component_type print(msg, file=sys.stderr) sys.exit() return actors def", "' + exn.argument_name msg += ' not used in the", "puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input :", "ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config, main_config): return", "the correct context\\nUse it with the following arguments :' for", "= self._gen_actor(component_type, component_config, config) except KeyError as exn: msg =", "db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv", "subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a", "NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to add", "= db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self,", "writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen", "in config: print('CLI error : no ' + self.component_group_name +", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT,", "check_msg='one or more csv files couldn\\'t be read') subparser_csv_input.add_argument('m', 'model',", "format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv", "# # * Redistributions of source code must retain the", "MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m',", "error : no ' + self.component_group_name + ' specified', file=sys.stderr)", "argument ' + exn.argument_name + ' : ' + exn.msg", "db, model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode,", "with or without # modification, are permitted provided that the", "MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m',", "subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that", "self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose'])", "print(msg, file=sys.stderr) except BadTypeException as exn: msg = 'CLI error", "to add to a DBActorGenerator a database factory with a", "help_str='specify a database output : --db_output database_name ARG1 ARG2 ...')", "db_config['directory'], files=[] if 'files' not in db_config else db_config['files']), 'influxdb':", "following disclaimer. # # * Redistributions in binary form must", "'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database", "powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from", "subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a", "files=[] if 'files' not in db_config else db_config['files']), 'influxdb': lambda", "'input') self.report_filter = report_filter def _actor_factory(self, name, db, model, stream_mode,", "functools import reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser import", "component_type print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name, component_config,", "factory in the DBActorGenerator \"\"\" class DBActorGenerator(Generator): def __init__(self, component_group_name):", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(),", "msg += ' not used in the correct context\\nUse it", "a DBActorGenerator a database factory with a name already bound", "of Lille # All rights reserved. # # Redistribution and", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True,", "mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d',", "where output csv files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify", "MissingValueException as exn: msg = 'CLI error : argument '", "couldn\\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data type that will", "# this list of conditions and the following disclaimer in", "') def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as exn:", "IN ANY WAY OUT OF THE USE # OF THIS", "database_name ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri',", "raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config, main_config):", "# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input :", "the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input',", "must retain the above copyright notice, this # list of", "from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import store_true from", "database_name ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri',", "+ self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors = {}", "in the DBActorGenerator \"\"\" class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self,", "subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data", "return args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose',", "help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream", "default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input : --db_output database_name", "} def add_model_factory(self, model_name, model_factory): if model_name in self.model_factory: raise", "met: # # * Redistributions of source code must retain", "powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel", "_actor_factory(self, name, db, model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter,", "PullerActor from powerapi.pusher import PusherActor def enable_log(arg, val, args, acc):", "main_config): db = self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name", "exn.context_list: msg += '\\n --' + main_arg_name + ' '", "FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT", "files couldn\\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data type that", "csv files couldn\\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data type", "exn.article + ' ' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException", "val, args, acc): acc[arg] = logging.DEBUG return args, acc def", "self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input =", "the following disclaimer in the documentation # and/or other materials", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as exn: msg", "'\\n --' + main_arg_name + ' ' + context_name print(msg,", "of conditions and the following disclaimer. # # * Redistributions", "help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection')", "# * Redistributions in binary form must reproduce the above", "puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output :", "and use in source and binary forms, with or without", "db_config, main_config): db = self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']]", "logging from functools import reduce from powerapi.exception import PowerAPIException from", "except BadValueException as exn: msg = 'CLI error : argument", "in db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']),", "files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that", "'files' not in db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'],", "file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "\"\"\" Exception raised when attempting to add to a DBActorGenerator", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR", "the copyright holder nor the names of its # contributors", "powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser", "file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv files couldn\\'t", "copyright holder nor the names of its # contributors may", "stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self,", "msg += '\\n --' + main_arg_name + ' ' +", "input csv files with this format : file1,file2,file3', action=extract_file_names, default=[],", "expect a value' print(msg, file=sys.stderr) except BadTypeException as exn: msg", "exn: msg = 'CLI error : argument ' + exn.args[0]", "add to a DBActorGenerator a database factory with a name", "PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE,", "NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter =", "error : argument ' + exn.argument_name + ' : '", "') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files", "conditions and the following disclaimer in the documentation # and/or", "# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "logging.DEBUG return args, acc def check_csv_files(files): return reduce(lambda acc, f:", "+ exn.argument_name + ' : expect ' msg += exn.article", "db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode,", "' + exn.argument_name + ' : expect ' msg +=", "self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream',", "help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that", "name, db, model, stream_mode, level_logger): return PusherActor(name, model, db, level_logger)", "store_true from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException,", "raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory): if", "+ context_name print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name):", "reproduce the above copyright notice, # this list of conditions", "component_name, component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config)", "default=[], check=check_csv_files, check_msg='one or more csv files couldn\\'t be read')", "help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database", "error : argument ' + exn.argument_name + ' : expect", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "name already bound to another model factory in the DBActorGenerator", "msg = 'CLI error : argument ' + exn.argument_name msg", "file=sys.stderr) except BadTypeException as exn: msg = 'CLI error :", "from powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel,", "in binary form must reproduce the above copyright notice, #", "'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data", "file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name, component_config, main_config): raise", "subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db',", "type that will be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n',", "be used to endorse or promote products derived from #", "os.R_OK), files.split(','), True) def extract_file_names(arg, val, args, acc): acc[arg] =", "database_name ARG1 ARG2 ... ') def parse_argv(self): try: return self.parse(sys.argv[1:])", "forms, with or without # modification, are permitted provided that", "ARISING IN ANY WAY OUT OF THE USE # OF", "help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output", "_gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name, db_config, main_config) model", "binary forms, with or without # modification, are permitted provided", "def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name,", "except MissingValueException as exn: msg = 'CLI error : argument", "# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException", "def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self,", "CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable", "ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify", "for main_arg_name, context_name in exn.context_list: msg += '\\n --' +", "add_db_factory(self, db_name, db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name]", "__init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode')", "+= exn.article + ' ' + exn.type_name print(msg, file=sys.stderr) except", "# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller", "factory in the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised", "+ exn.args[0] msg += ' needed with --output ' +", "help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type", "contributors may be used to endorse or promote products derived", "# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "config: print('CLI error : no ' + self.component_group_name + '", "acc, f: acc and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg,", "LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be", "in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name,", "'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = { 'mongodb': lambda", "'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB", "msg = 'CLI error : unknow argument ' + exn.argument_name", "provided that the following conditions are met: # # *", "check=check_csv_files, check_msg='one or more csv files couldn\\'t be read') subparser_csv_input.add_argument('m',", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF", "type that will be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n',", "'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database", "the documentation # and/or other materials provided with the distribution.", "InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int)", "flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u',", "os import sys import logging from functools import reduce from", "error : argument ' + exn.args[0] msg += ' needed", "exn.msg print(msg, file=sys.stderr) except MissingValueException as exn: msg = 'CLI", "DBActorGenerator \"\"\" class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory", "a database input : --db_output database_name ARG1 ARG2 ... ')", "are permitted provided that the following conditions are met: #", "db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self,", "main_arg_name, context_name in exn.context_list: msg += '\\n --' + main_arg_name", "verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode')", "subparser_influx_output, help_str='specify a database input : --db_output database_name ARG1 ARG2", "above copyright notice, this # list of conditions and the", "OF SUCH DAMAGE. import os import sys import logging from", "component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(),", "host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify", "default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input : --db_output database_name", "import reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser,", "# list of conditions and the following disclaimer. # #", "powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser", "the name of the copyright holder nor the names of", ": argument ' + exn.argument_name msg += ' not used", "def check_csv_files(files): return reduce(lambda acc, f: acc and os.access(f, os.R_OK),", "try: actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError as exn:", "default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input : --db_output database_name", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "to a DBActorGenerator a model factory with a name already", "' ' + context_name print(msg, file=sys.stderr) sys.exit() class Generator: def", "UnknowArgException as exn: msg = 'CLI error : unknow argument", "to add to a DBActorGenerator a model factory with a", "self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv':", "db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name,", "# All rights reserved. # # Redistribution and use in", "if 'files' not in db_config else db_config['files']), 'influxdb': lambda db_config:", "mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input", "... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host')", "components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError as", "where where output csv files will be writen') subparser_csv_output.add_argument('m', 'model',", "Redistributions in binary form must reproduce the above copyright notice,", "the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting", "powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model", "subparser_opentsdb_output, help_str='specify a database input : --db_output database_name ARG1 ARG2", "db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in", "to another database factory in the DBActorGenerator \"\"\" class DBActorGenerator(Generator):", "that will be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name',", "DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting to", "# Redistribution and use in source and binary forms, with", "'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name')", "AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN", "powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor", "--db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u',", "KeyError as exn: msg = 'CLI error : argument '", "the above copyright notice, # this list of conditions and", "the following conditions are met: # # * Redistributions of", "in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv')", "'CLI error : argument ' + exn.args[0] msg += '", "a name already bound to another model factory in the", "'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'],", "acc): acc[arg] = val.split(',') return args, acc class CommonCLIParser(MainParser): def", "OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if model_name", "acc): acc[arg] = logging.DEBUG return args, acc def check_csv_files(files): return", "--db_output database_name ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u',", "HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "'directory' not in db_config else db_config['directory'], files=[] if 'files' not", "model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self,", "db_factory def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self,", "import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from", "not in db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'],", "exn.argument_name + ' : expect ' msg += exn.article +", "DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport':", "will be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify", "be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller", "help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p',", "help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c',", "subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db',", "exn.args[0] msg += ' needed with --output ' + component_type", "list of conditions and the following disclaimer in the documentation", "ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify", "will be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify", "print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name =", "CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'], files=[] if", "* Redistributions of source code must retain the above copyright", ": argument ' + exn.args[0] msg += ' needed with", "parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as exn: msg =", "already bound to another database factory in the DBActorGenerator \"\"\"", "db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def", "PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import store_true", "# modification, are permitted provided that the following conditions are", "for component_name, component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config,", "the following disclaimer. # # * Redistributions in binary form", "def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose", "context_name in exn.context_list: msg += '\\n --' + main_arg_name +", "the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output',", "= { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(),", "MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s',", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "ControlModel(), } self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'],", "' specified', file=sys.stderr) sys.exit() actors = {} for component_type, components_list", "help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m',", "component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised", "main_config) model = self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db,", "subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen in", "' + exn.args[0] msg += ' needed with --output '", "following disclaimer in the documentation # and/or other materials provided", "') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d',", "FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO", "database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output,", "help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb')", "Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport':", "' + self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors =", "may be used to endorse or promote products derived from", "2018, University of Lille # All rights reserved. # #", "... ') def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as", "exn: msg = 'CLI error : argument ' + exn.argument_name", "# Copyright (c) 2018, University of Lille # All rights", "'model', help='specify data type that will be storen in the", "= ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files with this", "ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where", "CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher import", "{ 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), }", "# # * Redistributions in binary form must reproduce the", "subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen in", "be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "or promote products derived from # this software without specific", "+ ' ' + context_name print(msg, file=sys.stderr) sys.exit() class Generator:", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input : --db_output", "def add_db_factory(self, db_name, db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed()", "database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output,", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name')", "copyright notice, # this list of conditions and the following", "will be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify", "'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type", "HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB,", "' : expect a value' print(msg, file=sys.stderr) except BadTypeException as", "db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory'", "with --output ' + component_type print(msg, file=sys.stderr) sys.exit() return actors", "storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name',", "+= '\\n --' + main_arg_name + ' ' + context_name", "another model factory in the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\"", "database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model',", "OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA,", "database factory in the DBActorGenerator \"\"\" class DBActorGenerator(Generator): def __init__(self,", "this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more", "puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that will", "'ControlReport': ControlModel(), } self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'],", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES", "following conditions are met: # # * Redistributions of source", "def enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG return args,", "ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output csv files", "' + component_type print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self,", "subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database", "Lille # All rights reserved. # # Redistribution and use", "the names of its # contributors may be used to", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "add to a DBActorGenerator a model factory with a name", "* Neither the name of the copyright holder nor the", "else db_config['directory'], files=[] if 'files' not in db_config else db_config['files']),", "OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "the above copyright notice, this # list of conditions and", "and the following disclaimer in the documentation # and/or other", "type that will be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n',", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input", "subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a", "promote products derived from # this software without specific prior", "self.report_filter = report_filter def _actor_factory(self, name, db, model, stream_mode, level_logger):", "' not used in the correct context\\nUse it with the", "default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify", "self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model,", ": --db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb')", "import logging from functools import reduce from powerapi.exception import PowerAPIException", "help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input", "InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port',", "conditions and the following disclaimer. # # * Redistributions in", "not in db_config else db_config['directory'], files=[] if 'files' not in", "return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db,", "component_type, components_list in config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try:", "check_csv_files(files): return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','),", "= logging.DEBUG return args, acc def check_csv_files(files): return reduce(lambda acc,", "--db_output database_name ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u',", "<gh_stars>0 # Copyright (c) 2018, INRIA # Copyright (c) 2018,", "PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def", "msg = 'CLI error : argument ' + exn.args[0] msg", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS", "Copyright (c) 2018, University of Lille # All rights reserved.", "port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data", "OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher import PusherActor def", "name = db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def", "will be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "specified', file=sys.stderr) sys.exit() actors = {} for component_type, components_list in", "'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = {", "or more csv files couldn\\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify", "+ ' : ' + exn.msg print(msg, file=sys.stderr) except MissingValueException", "self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input : --db_output database_name ARG1", "name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input : --db_output", "print('CLI error : no ' + self.component_group_name + ' specified',", "All rights reserved. # # Redistribution and use in source", ": --db_output database_name ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb')", "sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name def", "component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when", "the following arguments :' for main_arg_name, context_name in exn.context_list: msg", "without # modification, are permitted provided that the following conditions", "(c) 2018, University of Lille # All rights reserved. #", "db = self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name =", "CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "model factory in the DBActorGenerator \"\"\" class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception", "MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException", "metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be", "sys.exit() return actors def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError()", "= self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name = db_config['name']", "config): if self.component_group_name not in config: print('CLI error : no", "--db_output database_name ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f',", "def __init__(self, component_group_name): self.component_group_name = component_group_name def generate(self, config): if", "name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen", "database factory with a name already bound to another database", "this list of conditions and the following disclaimer in the", "used to endorse or promote products derived from # this", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY", "import MainParser, ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser import", "modification, are permitted provided that the following conditions are met:", "a model factory with a name already bound to another", "IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "when attempting to add to a DBActorGenerator a database factory", "= component_group_name def generate(self, config): if self.component_group_name not in config:", "a DBActorGenerator a model factory with a name already bound", "ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException, MissingValueException", "db_name, db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] =", "POSSIBILITY OF SUCH DAMAGE. import os import sys import logging", "of the copyright holder nor the names of its #", "following arguments :' for main_arg_name, context_name in exn.context_list: msg +=", "class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET,", "{} for component_type, components_list in config[self.component_group_name].items(): for component_name, component_config in", "= {} for component_type, components_list in config[self.component_group_name].items(): for component_name, component_config", "uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify", "default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify", "model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger)", "from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import", "with a name already bound to another database factory in", "reserved. # # Redistribution and use in source and binary", "bound to another database factory in the DBActorGenerator \"\"\" class", "'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else", "that will be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name',", "+ exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn: msg =", "} self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']),", "return actors def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class", "PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model,", ": expect a value' print(msg, file=sys.stderr) except BadTypeException as exn:", "_generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config,", "ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB", "database_name ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri',", "from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from", "database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model',", "print(msg, file=sys.stderr) except UnknowArgException as exn: msg = 'CLI error", "subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database", "default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable", "main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): \"\"\" Exception raised when attempting", "except BadContextException as exn: msg = 'CLI error : argument", "in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb')", "self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport':", "+= ' not used in the correct context\\nUse it with", "'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port',", "except BadTypeException as exn: msg = 'CLI error : argument", "import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model import", "# * Redistributions of source code must retain the above", "+ ' specified', file=sys.stderr) sys.exit() actors = {} for component_type,", "subparser_mongo_input, help_str='specify a database input : --db_output database_name ARG1 ARG2", "'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd()", "= 'CLI error : argument ' + exn.argument_name msg +=", ": --db_output database_name ARG1 ARG2 ... ') def parse_argv(self): try:", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "DAMAGE. import os import sys import logging from functools import", "more csv files couldn\\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data", "ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB", "self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory): if db_name in", "file=sys.stderr) except MissingValueException as exn: msg = 'CLI error :", "the distribution. # # * Neither the name of the", "ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB", "acc[arg] = val.split(',') return args, acc class CommonCLIParser(MainParser): def __init__(self):", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p',", "to a DBActorGenerator a database factory with a name already", "actors def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException):", "(c) 2018, INRIA # Copyright (c) 2018, University of Lille", "actors = {} for component_type, components_list in config[self.component_group_name].items(): for component_name,", "error : unknow argument ' + exn.argument_name print(msg, file=sys.stderr) except", "db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']),", "file=sys.stderr) except BadContextException as exn: msg = 'CLI error :", "') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d',", "if self.component_group_name not in config: print('CLI error : no '", "holder nor the names of its # contributors may be", "type that will be storen in the database', default='HWPCReport') self.add_component_subparser('input',", "'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #", "STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify", "if 'directory' not in db_config else db_config['directory'], files=[] if 'files'", "model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode, level_logger):", "acc and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val, args,", "def _actor_factory(self, name, db, model, stream_mode, level_logger): return PullerActor(name, db,", "Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name def generate(self, config):", "to endorse or promote products derived from # this software", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output',", "= ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output csv", "csv files with this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files,", "args, acc): acc[arg] = logging.DEBUG return args, acc def check_csv_files(files):", "= db_factory def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def", "powerapi.pusher import PusherActor def enable_log(arg, val, args, acc): acc[arg] =", "import sys import logging from functools import reduce from powerapi.exception", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that", "its # contributors may be used to endorse or promote", "INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY,", "components_list in config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try: actors[component_name]", "__init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self, name,", "context\\nUse it with the following arguments :' for main_arg_name, context_name", "subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "ARG2 ... ') def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT", "subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "__init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model, stream_mode, level_logger):", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os", "error : argument ' + exn.argument_name msg += ' not", "uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection',", "msg += exn.article + ' ' + exn.type_name print(msg, file=sys.stderr)", "# this software without specific prior written permission. # THIS", "storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name',", "'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database", "of its # contributors may be used to endorse or", "component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config) except", ": unknow argument ' + exn.argument_name print(msg, file=sys.stderr) except BadContextException", "db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb':", "if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def", "if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def", "model = self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db, model,", "value' print(msg, file=sys.stderr) except BadTypeException as exn: msg = 'CLI", "# # * Neither the name of the copyright holder", "subparser_mongo_output, help_str='specify a database output : --db_output database_name ARG1 ARG2", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB", "'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database", "= val.split(',') return args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self)", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "= ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify", "= 'CLI error : argument ' + exn.argument_name + '", "_actor_factory(self, name, db, model, stream_mode, level_logger): return PusherActor(name, model, db,", "report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self, name, db,", "arguments :' for main_arg_name, context_name in exn.context_list: msg += '\\n", "help_str='specify a database input : --db_output database_name ARG1 ARG2 ...", "self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input : --db_output database_name ARG1", "subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify", "exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn: msg = 'CLI", "def generate(self, config): if self.component_group_name not in config: print('CLI error", "from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import", "def add_model_factory(self, model_name, model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed()", "attempting to add to a DBActorGenerator a database factory with", "used in the correct context\\nUse it with the following arguments", "' + exn.msg print(msg, file=sys.stderr) except MissingValueException as exn: msg", "derived from # this software without specific prior written permission.", "self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory):", "with the following arguments :' for main_arg_name, context_name in exn.context_list:", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "argument ' + exn.argument_name + ' : expect ' msg", "{ 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config:", "DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model, stream_mode, level_logger): return", "name already bound to another database factory in the DBActorGenerator", "msg += ' needed with --output ' + component_type print(msg,", "self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output : --db_output database_name ARG1", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type,", "class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = {", "database input : --db_output database_name ARG1 ARG2 ... ') def", "model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory", "conditions are met: # # * Redistributions of source code", "PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY", "storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name',", "name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input : --db_output", "the DBActorGenerator \"\"\" class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name)", "BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel,", "storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database", "\"\"\" class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory =", "self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input : --db_output database_name ARG1", "input : --db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output =", "and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val, args, acc):", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED", "default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input : --db_output database_name", "' + exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn: msg", "= ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify", "INRIA # Copyright (c) 2018, University of Lille # All", "name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model',", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #", "' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn: msg", "will be storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify", "lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'],", "self.component_group_name = component_group_name def generate(self, config): if self.component_group_name not in", "return args, acc def check_csv_files(files): return reduce(lambda acc, f: acc", "import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor from", "ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv')", "data type that will be storen in the database', default='HWPCReport')", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "type that will be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n',", "Neither the name of the copyright holder nor the names", "default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input : --db_output database_name", "EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO," ]
[ "GROUP IMPLEMENTATION: @property def refine_title(self): return \"In-situ behaviour\" @property def", "prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs", "# Complete license can be found in the LICENSE file.", "------------------------------------------------------------ # Initialization and other internals # ------------------------------------------------------------ def __init__(self,", "constructor # ------------------------------------------------------------ # Methods & Functions # ------------------------------------------------------------ def", "None\" assert self.is_compatible_with(phase), \"`%r` is not compatible with phase `%r`\"", "# Copyright (c) 2013, <NAME> # All rights reserved. #", "tabular=True ) # ------------------------------------------------------------ # Initialization and other internals #", "from pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base import DataModel from", "coding=UTF-8 # ex:ts=4:sw=4:et=on # # Copyright (c) 2013, <NAME> #", "return \"In-situ behaviour\" @property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\"", "INTEL: class Meta(DataModel.Meta): store_id = \"InSituBehaviour\" # Override this so", "string concrete = False # Indicates this cannot be instantiated", "with phase `%r`\" % (self, phase) def is_compatible_with(self, phase): return", "RefinementGroup, Storable): \"\"\" Interface class for coding in-situ behaviour scripts.", "sub classes need to override this pass #end of class", "concrete = False # Indicates this cannot be instantiated and", ") #: The name of this Behaviour name = StringProperty(", "property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self): return", "Behaviour name = StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True", "def refine_title(self): return \"In-situ behaviour\" @property def refine_descriptor_data(self): return dict(", "apply(self, phase): assert phase is not None, \"Cannot apply on", "# Initialization and other internals # ------------------------------------------------------------ def __init__(self, *args,", "\"Cannot apply on None\" assert self.is_compatible_with(phase), \"`%r` is not compatible", "of constructor # ------------------------------------------------------------ # Methods & Functions # ------------------------------------------------------------", "class Meta(DataModel.Meta): store_id = \"InSituBehaviour\" # Override this so it", "All rights reserved. # Complete license can be found in", "the LICENSE file. from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import", "and added in the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) #", "override or implement the methods below. \"\"\" # MODEL INTEL:", "= \"InSituBehaviour\" # Override this so it is a unique", "return dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #: The name of this", "# All rights reserved. # Complete license can be found", "mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property def", "# sub classes need to override this pass #end of", "apply on None\" assert self.is_compatible_with(phase), \"`%r` is not compatible with", "= StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True ) #", "return False # sub classes need to override this pass", "self.get_kwarg(kwargs, self.name, \"name\") pass #end of constructor # ------------------------------------------------------------ #", "a unique string concrete = False # Indicates this cannot", "license can be found in the LICENSE file. from mvc.models.properties", "UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property", "below. \"\"\" # MODEL INTEL: class Meta(DataModel.Meta): store_id = \"InSituBehaviour\"", "is_compatible_with(self, phase): return False # sub classes need to override", "Storable from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register()", "kwargs = my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, \"name\")", "refine_title(self): return \"In-situ behaviour\" @property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title,", "InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs with self.data_changed.hold():", "is a unique string concrete = False # Indicates this", "#end of constructor # ------------------------------------------------------------ # Methods & Functions #", "assert self.is_compatible_with(phase), \"`%r` is not compatible with phase `%r`\" %", "self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, \"name\") pass #end of constructor", "# Methods & Functions # ------------------------------------------------------------ def apply(self, phase): assert", "Meta(DataModel.Meta): store_id = \"InSituBehaviour\" # Override this so it is", "# coding=UTF-8 # ex:ts=4:sw=4:et=on # # Copyright (c) 2013, <NAME>", "Complete license can be found in the LICENSE file. from", "implement the methods below. \"\"\" # MODEL INTEL: class Meta(DataModel.Meta):", "Methods & Functions # ------------------------------------------------------------ def apply(self, phase): assert phase", "Sub-classes should override or implement the methods below. \"\"\" #", "should override or implement the methods below. \"\"\" # MODEL", "mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base", "*args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()]", "DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable):", "internals # ------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs,", "\"name\") pass #end of constructor # ------------------------------------------------------------ # Methods &", "self.is_compatible_with(phase), \"`%r` is not compatible with phase `%r`\" % (self,", "storables, Storable from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup", "and other internals # ------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs", "StringProperty from pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base import DataModel", "REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self): return \"In-situ behaviour\" @property", "unique string concrete = False # Indicates this cannot be", "my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, \"name\") pass #end", "# ------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label", "# Override this so it is a unique string concrete", "pass #end of constructor # ------------------------------------------------------------ # Methods & Functions", "in the LICENSE file. from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io", "store_id = \"InSituBehaviour\" # Override this so it is a", "reserved. # Complete license can be found in the LICENSE", "persistent=True, tabular=True ) # ------------------------------------------------------------ # Initialization and other internals", "Indicates this cannot be instantiated and added in the UI", "super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs with self.data_changed.hold(): self.name =", "\"\"\" # MODEL INTEL: class Meta(DataModel.Meta): store_id = \"InSituBehaviour\" #", "# ------------------------------------------------------------ # Initialization and other internals # ------------------------------------------------------------ def", "# ------------------------------------------------------------ def apply(self, phase): assert phase is not None,", "be instantiated and added in the UI mixture = property(DataModel.parent.fget,", "can be found in the LICENSE file. from mvc.models.properties import", "Interface class for coding in-situ behaviour scripts. Sub-classes should override", "Copyright (c) 2013, <NAME> # All rights reserved. # Complete", "(c) 2013, <NAME> # All rights reserved. # Complete license", "so it is a unique string concrete = False #", "refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #: The name of", "*[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs", "compatible with phase `%r`\" % (self, phase) def is_compatible_with(self, phase):", "% (self, phase) def is_compatible_with(self, phase): return False # sub", "this so it is a unique string concrete = False", "ex:ts=4:sw=4:et=on # # Copyright (c) 2013, <NAME> # All rights", "The name of this Behaviour name = StringProperty( default=\"New Behaviour\",", "this Behaviour name = StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True,", "added in the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT", "<NAME> # All rights reserved. # Complete license can be", "= False # Indicates this cannot be instantiated and added", ") super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs with self.data_changed.hold(): self.name", "rights reserved. # Complete license can be found in the", "pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface", "= self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args,", "default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------ #", "phase is not None, \"Cannot apply on None\" assert self.is_compatible_with(phase),", "self.name, \"name\") pass #end of constructor # ------------------------------------------------------------ # Methods", "@property def refine_title(self): return \"In-situ behaviour\" @property def refine_descriptor_data(self): return", "it is a unique string concrete = False # Indicates", "# MODEL INTEL: class Meta(DataModel.Meta): store_id = \"InSituBehaviour\" # Override", "#: The name of this Behaviour name = StringProperty( default=\"New", "DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self): return \"In-situ", "Override this so it is a unique string concrete =", "MODEL INTEL: class Meta(DataModel.Meta): store_id = \"InSituBehaviour\" # Override this", ") # ------------------------------------------------------------ # Initialization and other internals # ------------------------------------------------------------", "for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs =", "this cannot be instantiated and added in the UI mixture", "from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import storables, Storable from", "import storables, Storable from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import", "my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour,", "def __init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop", "@storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface class for coding", "methods below. \"\"\" # MODEL INTEL: class Meta(DataModel.Meta): store_id =", "`%r`\" % (self, phase) def is_compatible_with(self, phase): return False #", "cannot be instantiated and added in the UI mixture =", "be found in the LICENSE file. from mvc.models.properties import StringProperty", "------------------------------------------------------------ def apply(self, phase): assert phase is not None, \"Cannot", "(self, phase) def is_compatible_with(self, phase): return False # sub classes", "# REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self): return \"In-situ behaviour\"", "Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------ # Initialization", "in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs with", "is not None, \"Cannot apply on None\" assert self.is_compatible_with(phase), \"`%r`", "\"InSituBehaviour\" # Override this so it is a unique string", "__init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop in", "phase): return False # sub classes need to override this", "StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------", "# ------------------------------------------------------------ # Methods & Functions # ------------------------------------------------------------ def apply(self,", "behaviour scripts. Sub-classes should override or implement the methods below.", "self.name = self.get_kwarg(kwargs, self.name, \"name\") pass #end of constructor #", "not compatible with phase `%r`\" % (self, phase) def is_compatible_with(self,", "False # sub classes need to override this pass #end", "**kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] )", "# # Copyright (c) 2013, <NAME> # All rights reserved.", "is not compatible with phase `%r`\" % (self, phase) def", "Storable): \"\"\" Interface class for coding in-situ behaviour scripts. Sub-classes", "pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins", "\"`%r` is not compatible with phase `%r`\" % (self, phase)", "class for coding in-situ behaviour scripts. Sub-classes should override or", "self).__init__(*args, **kwargs) kwargs = my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs,", "Functions # ------------------------------------------------------------ def apply(self, phase): assert phase is not", "from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class", "or implement the methods below. \"\"\" # MODEL INTEL: class", "from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\"", "RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface class for", "= self.get_kwarg(kwargs, self.name, \"name\") pass #end of constructor # ------------------------------------------------------------", "@property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #: The", "LICENSE file. from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import storables,", "def is_compatible_with(self, phase): return False # sub classes need to", "name = StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True, persistent=True, tabular=True )", "def apply(self, phase): assert phase is not None, \"Cannot apply", "instantiated and added in the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset)", "in the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP", "the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION:", "dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #: The name of this Behaviour", "phase_name=self.phase.refine_title, component_name=\"*\" ) #: The name of this Behaviour name", "behaviour\" @property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #:", "in-situ behaviour scripts. Sub-classes should override or implement the methods", "assert phase is not None, \"Cannot apply on None\" assert", "found in the LICENSE file. from mvc.models.properties import StringProperty from", "False # Indicates this cannot be instantiated and added in", "pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel,", "\"In-situ behaviour\" @property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\" )", "phase `%r`\" % (self, phase) def is_compatible_with(self, phase): return False", "# ex:ts=4:sw=4:et=on # # Copyright (c) 2013, <NAME> # All", "2013, <NAME> # All rights reserved. # Complete license can", "import StringProperty from pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base import", "text=\"Name\", visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------ # Initialization and", "class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface class for coding in-situ", "def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name=\"*\" ) #: The name", "visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------ # Initialization and other", "Initialization and other internals # ------------------------------------------------------------ def __init__(self, *args, **kwargs):", "phase): assert phase is not None, \"Cannot apply on None\"", "# Indicates this cannot be instantiated and added in the", "other internals # ------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs =", "import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface class", "component_name=\"*\" ) #: The name of this Behaviour name =", "the methods below. \"\"\" # MODEL INTEL: class Meta(DataModel.Meta): store_id", "of this Behaviour name = StringProperty( default=\"New Behaviour\", text=\"Name\", visible=True,", "scripts. Sub-classes should override or implement the methods below. \"\"\"", "self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs)", "------------------------------------------------------------ # Methods & Functions # ------------------------------------------------------------ def apply(self, phase):", "\"\"\" Interface class for coding in-situ behaviour scripts. Sub-classes should", "= my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, \"name\") pass", "None, \"Cannot apply on None\" assert self.is_compatible_with(phase), \"`%r` is not", "with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, \"name\") pass #end of", "InSituBehaviour(DataModel, RefinementGroup, Storable): \"\"\" Interface class for coding in-situ behaviour", "& Functions # ------------------------------------------------------------ def apply(self, phase): assert phase is", "phase) def is_compatible_with(self, phase): return False # sub classes need", "on None\" assert self.is_compatible_with(phase), \"`%r` is not compatible with phase", "------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for", "file. from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import storables, Storable", "= property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self):", "import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup,", "IMPLEMENTATION: @property def refine_title(self): return \"In-situ behaviour\" @property def refine_descriptor_data(self):", "for coding in-situ behaviour scripts. Sub-classes should override or implement", "**kwargs) kwargs = my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name,", "not None, \"Cannot apply on None\" assert self.is_compatible_with(phase), \"`%r` is", "name of this Behaviour name = StringProperty( default=\"New Behaviour\", text=\"Name\",", "coding in-situ behaviour scripts. Sub-classes should override or implement the" ]
[ "= 80 # the bug xPos = 40 yPos =", "(xPos + worldXSize) % worldXSize yPos = (yPos + worldYSize)", "1 plainProgrammingBug.py # start 1 plainProgrammingBug.py import random def SimpleBug():", "randomMove() yPos += randomMove() with xPos += random.randint(-1, 1) yPos", "with equal probability def randomMove(): return random.randint(-1, 1) SimpleBug() \"\"\"", "1 plainProgrammingBug.py import random def SimpleBug(): # the environment worldXSize", "= (yPos + worldYSize) % worldYSize print (\"I moved to", "+= random.randint(-1, 1) yPos += random.randint(-1, 1) but the use", "(yPos + worldYSize) % worldYSize print (\"I moved to X", "= (xPos + worldXSize) % worldXSize yPos = (yPos +", "worldYSize) % worldYSize print (\"I moved to X = \",", "-1, 0, 1 with equal probability def randomMove(): return random.randint(-1,", "+= random.randint(-1, 1) but the use of the function allows", "moved to X = \", xPos, \" Y = \",", "1) SimpleBug() \"\"\" you can eliminate the randomMove() function substituting", "randomMove() xPos = (xPos + worldXSize) % worldXSize yPos =", "worldXSize = 80 worldYSize = 80 # the bug xPos", "% worldYSize print (\"I moved to X = \", xPos,", "Y = \", yPos) # returns -1, 0, 1 with", "function substituting xPos += randomMove() yPos += randomMove() with xPos", "the randomMove() function substituting xPos += randomMove() yPos += randomMove()", "= \", xPos, \" Y = \", yPos) # returns", "range(100): xPos += randomMove() yPos += randomMove() xPos = (xPos", "xPos, \" Y = \", yPos) # returns -1, 0,", "+ worldYSize) % worldYSize print (\"I moved to X =", "+ worldXSize) % worldXSize yPos = (yPos + worldYSize) %", "+= randomMove() yPos += randomMove() xPos = (xPos + worldXSize)", "yPos = (yPos + worldYSize) % worldYSize print (\"I moved", "randomMove() function substituting xPos += randomMove() yPos += randomMove() with", "yPos += randomMove() xPos = (xPos + worldXSize) % worldXSize", "function allows us to use here a self-explanatory name \"\"\"", "return random.randint(-1, 1) SimpleBug() \"\"\" you can eliminate the randomMove()", "X = \", xPos, \" Y = \", yPos) #", "you can eliminate the randomMove() function substituting xPos += randomMove()", "# returns -1, 0, 1 with equal probability def randomMove():", "print (\"I moved to X = \", xPos, \" Y", "the action for i in range(100): xPos += randomMove() yPos", "80 worldYSize = 80 # the bug xPos = 40", "40 # the action for i in range(100): xPos +=", "with xPos += random.randint(-1, 1) yPos += random.randint(-1, 1) but", "random.randint(-1, 1) SimpleBug() \"\"\" you can eliminate the randomMove() function", "# the bug xPos = 40 yPos = 40 #", "randomMove(): return random.randint(-1, 1) SimpleBug() \"\"\" you can eliminate the", "random.randint(-1, 1) yPos += random.randint(-1, 1) but the use of", "but the use of the function allows us to use", "+= randomMove() with xPos += random.randint(-1, 1) yPos += random.randint(-1,", "80 # the bug xPos = 40 yPos = 40", "use of the function allows us to use here a", "def randomMove(): return random.randint(-1, 1) SimpleBug() \"\"\" you can eliminate", "plainProgrammingBug/start 1 plainProgrammingBug.py # start 1 plainProgrammingBug.py import random def", "worldYSize = 80 # the bug xPos = 40 yPos", "xPos = (xPos + worldXSize) % worldXSize yPos = (yPos", "1) but the use of the function allows us to", "= 80 worldYSize = 80 # the bug xPos =", "plainProgrammingBug.py # start 1 plainProgrammingBug.py import random def SimpleBug(): #", "xPos = 40 yPos = 40 # the action for", "random.randint(-1, 1) but the use of the function allows us", "SimpleBug() \"\"\" you can eliminate the randomMove() function substituting xPos", "import random def SimpleBug(): # the environment worldXSize = 80", "# start 1 plainProgrammingBug.py import random def SimpleBug(): # the", "worldXSize yPos = (yPos + worldYSize) % worldYSize print (\"I", "plainProgrammingBug.py import random def SimpleBug(): # the environment worldXSize =", "SimpleBug(): # the environment worldXSize = 80 worldYSize = 80", "= \", yPos) # returns -1, 0, 1 with equal", "random def SimpleBug(): # the environment worldXSize = 80 worldYSize", "\" Y = \", yPos) # returns -1, 0, 1", "the use of the function allows us to use here", "yPos = 40 # the action for i in range(100):", "in range(100): xPos += randomMove() yPos += randomMove() xPos =", "<reponame>vishalbelsare/SLAPP3<filename>1 plainProgrammingBug/start 1 plainProgrammingBug.py # start 1 plainProgrammingBug.py import random", "the environment worldXSize = 80 worldYSize = 80 # the", "of the function allows us to use here a self-explanatory", "\", xPos, \" Y = \", yPos) # returns -1,", "# the action for i in range(100): xPos += randomMove()", "1) yPos += random.randint(-1, 1) but the use of the", "worldXSize) % worldXSize yPos = (yPos + worldYSize) % worldYSize", "0, 1 with equal probability def randomMove(): return random.randint(-1, 1)", "the function allows us to use here a self-explanatory name", "yPos += random.randint(-1, 1) but the use of the function", "= 40 yPos = 40 # the action for i", "substituting xPos += randomMove() yPos += randomMove() with xPos +=", "to X = \", xPos, \" Y = \", yPos)", "environment worldXSize = 80 worldYSize = 80 # the bug", "# the environment worldXSize = 80 worldYSize = 80 #", "the bug xPos = 40 yPos = 40 # the", "xPos += randomMove() yPos += randomMove() with xPos += random.randint(-1,", "yPos) # returns -1, 0, 1 with equal probability def", "equal probability def randomMove(): return random.randint(-1, 1) SimpleBug() \"\"\" you", "(\"I moved to X = \", xPos, \" Y =", "can eliminate the randomMove() function substituting xPos += randomMove() yPos", "xPos += randomMove() yPos += randomMove() xPos = (xPos +", "def SimpleBug(): # the environment worldXSize = 80 worldYSize =", "i in range(100): xPos += randomMove() yPos += randomMove() xPos", "bug xPos = 40 yPos = 40 # the action", "eliminate the randomMove() function substituting xPos += randomMove() yPos +=", "randomMove() with xPos += random.randint(-1, 1) yPos += random.randint(-1, 1)", "40 yPos = 40 # the action for i in", "probability def randomMove(): return random.randint(-1, 1) SimpleBug() \"\"\" you can", "1 with equal probability def randomMove(): return random.randint(-1, 1) SimpleBug()", "randomMove() yPos += randomMove() xPos = (xPos + worldXSize) %", "+= randomMove() yPos += randomMove() with xPos += random.randint(-1, 1)", "yPos += randomMove() with xPos += random.randint(-1, 1) yPos +=", "for i in range(100): xPos += randomMove() yPos += randomMove()", "= 40 # the action for i in range(100): xPos", "action for i in range(100): xPos += randomMove() yPos +=", "start 1 plainProgrammingBug.py import random def SimpleBug(): # the environment", "worldYSize print (\"I moved to X = \", xPos, \"", "\", yPos) # returns -1, 0, 1 with equal probability", "returns -1, 0, 1 with equal probability def randomMove(): return", "+= randomMove() xPos = (xPos + worldXSize) % worldXSize yPos", "% worldXSize yPos = (yPos + worldYSize) % worldYSize print", "\"\"\" you can eliminate the randomMove() function substituting xPos +=", "xPos += random.randint(-1, 1) yPos += random.randint(-1, 1) but the" ]
[ "if d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1 d[m] = min_coin", "= 18705 #coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list =", "[24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for m", "d = {0:0} for m in range(1,money+1): min_coin = 1000000", "min_coin = 1000000 for coin in coin_list: if m >=", "= 8074 #money = 18705 #coin_list = [24,23,21,5,3,1] coin_list =", "#coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(','))", "= [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d", "= 1000000 for coin in coin_list: if m >= coin:", "coin in coin_list: if m >= coin: if d[m-coin]+1 <", "open('dataset_71_8.txt').read().split(',')) d = {0:0} for m in range(1,money+1): min_coin =", "m >= coin: if d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1", "< min_coin: min_coin = d[m-coin]+1 d[m] = min_coin #print d", "[24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d =", "= map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for m in range(1,money+1):", "in coin_list: if m >= coin: if d[m-coin]+1 < min_coin:", "map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for m in range(1,money+1): min_coin", "min_coin: min_coin = d[m-coin]+1 d[m] = min_coin #print d print", "money = 8074 #money = 18705 #coin_list = [24,23,21,5,3,1] coin_list", "1000000 for coin in coin_list: if m >= coin: if", "for coin in coin_list: if m >= coin: if d[m-coin]+1", "8074 #money = 18705 #coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1]", "= [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for", "coin: if d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1 d[m] =", "range(1,money+1): min_coin = 1000000 for coin in coin_list: if m", "#coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for m in", ">= coin: if d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1 d[m]", "for m in range(1,money+1): min_coin = 1000000 for coin in", "18705 #coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list = map(int,", "min_coin = d[m-coin]+1 d[m] = min_coin #print d print d[money]", "#money = 18705 #coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list", "{0:0} for m in range(1,money+1): min_coin = 1000000 for coin", "d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1 d[m] = min_coin #print", "in range(1,money+1): min_coin = 1000000 for coin in coin_list: if", "if m >= coin: if d[m-coin]+1 < min_coin: min_coin =", "= {0:0} for m in range(1,money+1): min_coin = 1000000 for", "coin_list = [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0}", "m in range(1,money+1): min_coin = 1000000 for coin in coin_list:", "coin_list: if m >= coin: if d[m-coin]+1 < min_coin: min_coin" ]
[ "file. \"\"\" from textx import metamodel_for_language from txbibtex import bibentry_str", "\"\"\" from textx import metamodel_for_language from txbibtex import bibentry_str BIB_FILE", "metamodel_for_language from txbibtex import bibentry_str BIB_FILE = 'references.bib' bibfile =", "Remove comments from bib file. \"\"\" from textx import metamodel_for_language", "Drop line comments. print('\\n'.join([bibentry_str(e) for e in bibfile.entries if e.__class__.__name__", "comments. print('\\n'.join([bibentry_str(e) for e in bibfile.entries if e.__class__.__name__ != 'BibLineComment']))", "metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\\n'.join([bibentry_str(e) for e in bibfile.entries", "\"\"\" Remove comments from bib file. \"\"\" from textx import", "import metamodel_for_language from txbibtex import bibentry_str BIB_FILE = 'references.bib' bibfile", "textx import metamodel_for_language from txbibtex import bibentry_str BIB_FILE = 'references.bib'", "'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\\n'.join([bibentry_str(e) for", "bib file. \"\"\" from textx import metamodel_for_language from txbibtex import", "from txbibtex import bibentry_str BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE)", "= metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\\n'.join([bibentry_str(e) for e in", "line comments. print('\\n'.join([bibentry_str(e) for e in bibfile.entries if e.__class__.__name__ !=", "bibentry_str BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line", "BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments.", "comments from bib file. \"\"\" from textx import metamodel_for_language from", "# Drop line comments. print('\\n'.join([bibentry_str(e) for e in bibfile.entries if", "from textx import metamodel_for_language from txbibtex import bibentry_str BIB_FILE =", "= 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\\n'.join([bibentry_str(e)", "import bibentry_str BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop", "from bib file. \"\"\" from textx import metamodel_for_language from txbibtex", "txbibtex import bibentry_str BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) #", "bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\\n'.join([bibentry_str(e) for e" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from", "create a Google Compute Engine Group Placement Resource policy with", "Group Placement Resource policy with 2 VMs and 2 availability", "coding: utf-8 -*- # # Copyright 2019 Google LLC. All", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "resource policy command.\"\"\" from __future__ import absolute_import from __future__ import", "def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref", "Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args):", "util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service", "distributed under the License is distributed on an \"AS IS\"", "utils as compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import", "service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement):", "Resource policy with 2 VMs and 2 availability domains, run:", "the specific language governing permissions and # limitations under the", "import util def _CommonArgs(parser, api_version): \"\"\"A helper function to build", "\"\"\"Create a Google Compute Engine Group Placement Resource Policy.\"\"\" @staticmethod", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "base_classes from googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util import", "class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute Engine Group Placement Resource", "api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\ Create a Google Compute", "\"\"\", 'EXAMPLES': \"\"\"\\ To create a Google Compute Engine Group", "under the License. \"\"\"Create resource policy command.\"\"\" from __future__ import", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy", "Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client", "a Google Compute Engine Group Placement Resource policy with 2", "from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute", "Engine Group Placement Resource policy with 2 VMs and 2", "Copyright 2019 Google LLC. All Rights Reserved. # # Licensed", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "Google Compute Engine Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser):", "import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes", "not use this file except in compliance with the License.", "governing permissions and # limitations under the License. \"\"\"Create resource", "client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute", "LLC. All Rights Reserved. # # Licensed under the Apache", "googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import", "writing, software # distributed under the License is distributed on", "= holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest(", "_CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client =", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "@staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder =", "\"\"\"\\ Create a Google Compute Engine Group Placement Resource Policy.", "util def _CommonArgs(parser, api_version): \"\"\"A helper function to build args", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client))", "googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): \"\"\"A helper function to", "on different API version.\"\"\" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser)", "messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute Engine", "googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils as compute_api from", "Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self,", "def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack())", "holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy,", "Engine Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION)", "'DESCRIPTION': \"\"\"\\ Create a Google Compute Engine Group Placement Resource", "2 availability domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2", "holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args,", "use this file except in compliance with the License. #", "policy command.\"\"\" from __future__ import absolute_import from __future__ import division", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request =", "resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0]", "import flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): \"\"\"A", "'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute Engine", "googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version):", "messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA)", "@base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute Engine Group Placement", "-*- # # Copyright 2019 Google LLC. All Rights Reserved.", "\"\"\"A helper function to build args based on different API", "CONDITIONS OF ANY KIND, either express or implied. # See", "import base_classes from googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util", "Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION':", "availability domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2 \"\"\"", "= util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "{ 'DESCRIPTION': \"\"\"\\ Create a Google Compute Engine Group Placement", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA)", "region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class", "Create a Google Compute Engine Group Placement Resource Policy. \"\"\",", "Google Compute Engine Group Placement Resource policy with 2 VMs", "__future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "# # Copyright 2019 Google LLC. All Rights Reserved. #", "specific language governing permissions and # limitations under the License.", "different API version.\"\"\" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser,", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"\"\"Create resource policy command.\"\"\" from __future__ import absolute_import from __future__", "= holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages =", "License for the specific language governing permissions and # limitations", "to build args based on different API version.\"\"\" messages =", "a Google Compute Engine Group Placement Resource Policy.\"\"\" @staticmethod def", "def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\ Create", "Compute Engine Group Placement Resource policy with 2 VMs and", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute", "flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref,", "limitations under the License. \"\"\"Create resource policy command.\"\"\" from __future__", "Placement Resource policy with 2 VMs and 2 availability domains,", "Placement Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To create a Google", "= flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy =", "Reserved. # # Licensed under the Apache License, Version 2.0", "with 2 VMs and 2 availability domains, run: $ {command}", "absolute_import from __future__ import division from __future__ import unicode_literals from", "scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request", "base from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import", "Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help =", "googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from", "the License for the specific language governing permissions and #", "Compute Engine Group Placement Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To", "import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils", "import apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags", "(the \"License\"); # you may not use this file except", "-*- coding: utf-8 -*- # # Copyright 2019 Google LLC.", "Apache License, Version 2.0 (the \"License\"); # you may not", "helper function to build args based on different API version.\"\"\"", "# you may not use this file except in compliance", "import utils as compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope", "either express or implied. # See the License for the", "apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand):", "import absolute_import from __future__ import division from __future__ import unicode_literals", "and # limitations under the License. \"\"\"Create resource policy command.\"\"\"", "flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a", "OR CONDITIONS OF ANY KIND, either express or implied. #", "Engine Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION)", "flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): \"\"\"A helper", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a", "@staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\", "unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils as", "the License is distributed on an \"AS IS\" BASIS, #", "from __future__ import absolute_import from __future__ import division from __future__", "flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import", "Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help", "Group Placement Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To create a", "utf-8 -*- # # Copyright 2019 Google LLC. All Rights", "in compliance with the License. # You may obtain a", "Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = {", "Compute Engine Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser,", "VMs and 2 availability domains, run: $ {command} my-resource-policy --region=REGION", "software # distributed under the License is distributed on an", "parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute Engine Group", "import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies", "from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): \"\"\"A helper function", "import base from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies", "version.\"\"\" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None)", "from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser,", "from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils as compute_api", "'EXAMPLES': \"\"\"\\ To create a Google Compute Engine Group Placement", "args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource(", "# # Unless required by applicable law or agreed to", "Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To create a Google Compute Engine", "= { 'DESCRIPTION': \"\"\"\\ Create a Google Compute Engine Group", "resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project,", "return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "__future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import", "# -*- coding: utf-8 -*- # # Copyright 2019 Google", "Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder", "Version 2.0 (the \"License\"); # you may not use this", "holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages)", "a Google Compute Engine Group Placement Resource Policy. \"\"\", 'EXAMPLES':", "law or agreed to in writing, software # distributed under", "api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create", "create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return", "api_version): \"\"\"A helper function to build args based on different", "Google LLC. All Rights Reserved. # # Licensed under the", "CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute Engine Group Placement Resource Policy.\"\"\"", "and 2 availability domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2", "compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from", "_CommonArgs(parser, api_version): \"\"\"A helper function to build args based on", "permissions and # limitations under the License. \"\"\"Create resource policy", "from googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util import apis", "= apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class", "from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute", "based on different API version.\"\"\" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser)", "__future__ import absolute_import from __future__ import division from __future__ import", "_CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\ Create a Google", "\"\"\"\\ To create a Google Compute Engine Group Placement Resource", "implied. # See the License for the specific language governing", "build args based on different API version.\"\"\" messages = apis.GetMessagesModule('compute',", "# limitations under the License. \"\"\"Create resource policy command.\"\"\" from", "under the Apache License, Version 2.0 (the \"License\"); # you", "Engine Group Placement Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To create", "\"License\"); # you may not use this file except in", "2 VMs and 2 availability domains, run: $ {command} my-resource-policy", "googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as compute_flags from", "CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute Engine Group Placement Resource Policy.\"\"\"", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "License. \"\"\"Create resource policy command.\"\"\" from __future__ import absolute_import from", "as compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base", "CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\ Create a Google Compute Engine", "the License. \"\"\"Create resource policy command.\"\"\" from __future__ import absolute_import", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "To create a Google Compute Engine Group Placement Resource policy", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags", "Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\ To create a Google Compute", "from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as compute_flags", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util", "holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages", "Google Compute Engine Group Placement Resource Policy. \"\"\", 'EXAMPLES': \"\"\"\\", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "API version.\"\"\" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages)", "to in writing, software # distributed under the License is", "messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies", "@base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute Engine Group Placement", "api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client", "2019 Google LLC. All Rights Reserved. # # Licensed under", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util import apis from", "class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute Engine Group Placement Resource", "args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args,", "language governing permissions and # limitations under the License. \"\"\"Create", "args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service =", "Group Placement Resource Policy.\"\"\" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref =", "= messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service,", "create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create a Google Compute Engine Group", "command.\"\"\" from __future__ import absolute_import from __future__ import division from", "required by applicable law or agreed to in writing, software", "flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google", "domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2 \"\"\" }", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# Copyright 2019 Google LLC. All Rights Reserved. # #", "args based on different API version.\"\"\" messages = apis.GetMessagesModule('compute', api_version)", "client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages", "= holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): \"\"\"Create", "with the License. # You may obtain a copy of", "def _CommonArgs(parser, api_version): \"\"\"A helper function to build args based", "this file except in compliance with the License. # You", "= base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources,", "the Apache License, Version 2.0 (the \"License\"); # you may", "policy with 2 VMs and 2 availability domains, run: $", "flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): \"\"\"Create a Google Compute", "compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util def", "messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert',", "Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': \"\"\"\\ Create a", "function to build args based on different API version.\"\"\" messages" ]
[ "\"\"\"Parse a file containing researchers.\"\"\" try: with open(filename, \"r\") as", "filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse a file containing papers.\"\"\" try:", "<gh_stars>10-100 import json from .papers import Papers from .researchers import", "containing researchers.\"\"\" try: with open(filename, \"r\") as file: data =", "Researchers(data, filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse a file containing papers.\"\"\"", ".researchers import Researchers def ResearchersFile(filename): \"\"\"Parse a file containing researchers.\"\"\"", "json.load(file) except FileNotFoundError: data = {} return Researchers(data, filename=filename) def", "data = json.load(file) except FileNotFoundError: data = {} return Researchers(data,", "import json from .papers import Papers from .researchers import Researchers", "file: data = json.load(file) except FileNotFoundError: data = {} return", "ResearchersFile(filename): \"\"\"Parse a file containing researchers.\"\"\" try: with open(filename, \"r\")", "{} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse a file", "as file: data = json.load(file) except FileNotFoundError: data = {}", "def PapersFile(filename, researchers=None): \"\"\"Parse a file containing papers.\"\"\" try: with", "papers.\"\"\" try: with open(filename, \"r\") as file: data = json.load(file)", "containing papers.\"\"\" try: with open(filename, \"r\") as file: data =", "except FileNotFoundError: data = {} return Researchers(data, filename=filename) def PapersFile(filename,", "\"r\") as file: data = json.load(file) except FileNotFoundError: data =", "open(filename, \"r\") as file: data = json.load(file) except FileNotFoundError: data", "researchers=None): \"\"\"Parse a file containing papers.\"\"\" try: with open(filename, \"r\")", "= json.load(file) except FileNotFoundError: data = {} return Papers(data, filename=filename,", "with open(filename, \"r\") as file: data = json.load(file) except FileNotFoundError:", "import Researchers def ResearchersFile(filename): \"\"\"Parse a file containing researchers.\"\"\" try:", ".papers import Papers from .researchers import Researchers def ResearchersFile(filename): \"\"\"Parse", "from .papers import Papers from .researchers import Researchers def ResearchersFile(filename):", "data = {} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse", "from .researchers import Researchers def ResearchersFile(filename): \"\"\"Parse a file containing", "a file containing papers.\"\"\" try: with open(filename, \"r\") as file:", "FileNotFoundError: data = {} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None):", "file containing papers.\"\"\" try: with open(filename, \"r\") as file: data", "\"\"\"Parse a file containing papers.\"\"\" try: with open(filename, \"r\") as", "researchers.\"\"\" try: with open(filename, \"r\") as file: data = json.load(file)", "json from .papers import Papers from .researchers import Researchers def", "return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse a file containing", "def ResearchersFile(filename): \"\"\"Parse a file containing researchers.\"\"\" try: with open(filename,", "import Papers from .researchers import Researchers def ResearchersFile(filename): \"\"\"Parse a", "try: with open(filename, \"r\") as file: data = json.load(file) except", "a file containing researchers.\"\"\" try: with open(filename, \"r\") as file:", "= json.load(file) except FileNotFoundError: data = {} return Researchers(data, filename=filename)", "PapersFile(filename, researchers=None): \"\"\"Parse a file containing papers.\"\"\" try: with open(filename,", "Papers from .researchers import Researchers def ResearchersFile(filename): \"\"\"Parse a file", "file containing researchers.\"\"\" try: with open(filename, \"r\") as file: data", "json.load(file) except FileNotFoundError: data = {} return Papers(data, filename=filename, researchers=researchers)", "= {} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): \"\"\"Parse a", "Researchers def ResearchersFile(filename): \"\"\"Parse a file containing researchers.\"\"\" try: with", "data = json.load(file) except FileNotFoundError: data = {} return Papers(data," ]
[ "2017 SiteWare Corp. All right reserved ############################################################################# import logging import", "Corp. All right reserved ############################################################################# import logging import pytest from", "addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert", "address assert address >= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload()", "import pytest from . import addresses def test_pytest(): assert True", "address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo')", "addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo') addresses.cidr_db.reload()", "assert address >= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert", "logging import pytest from . import addresses def test_pytest(): assert", "test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >= addresses.cidr_db.first_address()", "address >= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo')", "All right reserved ############################################################################# import logging import pytest from .", "############################################################################# import logging import pytest from . import addresses def", "import logging import pytest from . import addresses def test_pytest():", "from . import addresses def test_pytest(): assert True def test_object_exists():", "reserved ############################################################################# import logging import pytest from . import addresses", "pytest from . import addresses def test_pytest(): assert True def", "addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address", "addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not", "<reponame>zachkont/sd2 ############################################################################# # Copyright (c) 2017 SiteWare Corp. All right", "addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address", "right reserved ############################################################################# import logging import pytest from . import", "def test_object_exists(): assert addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert", "def test_pytest(): assert True def test_object_exists(): assert addresses.cidr_db def test_new_address():", "<= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo')", "Copyright (c) 2017 SiteWare Corp. All right reserved ############################################################################# import", "SiteWare Corp. All right reserved ############################################################################# import logging import pytest", "addresses def test_pytest(): assert True def test_object_exists(): assert addresses.cidr_db def", "# Copyright (c) 2017 SiteWare Corp. All right reserved #############################################################################", "assert addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert", "True def test_object_exists(): assert addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo')", "== address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo') addresses.cidr_db.reload() assert", "############################################################################# # Copyright (c) 2017 SiteWare Corp. All right reserved", "= addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >= addresses.cidr_db.first_address() assert address", "assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo')", "(c) 2017 SiteWare Corp. All right reserved ############################################################################# import logging", "assert address assert address >= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address()", "test_pytest(): assert True def test_object_exists(): assert addresses.cidr_db def test_new_address(): address", "addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >= addresses.cidr_db.first_address() assert address <=", "address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >= addresses.cidr_db.first_address() assert", "assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo') addresses.cidr_db.reload() assert not addresses.cidr_db.has('test_test_foo')", ". import addresses def test_pytest(): assert True def test_object_exists(): assert", "test_object_exists(): assert addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address", "def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >=", "assert True def test_object_exists(): assert addresses.cidr_db def test_new_address(): address =", ">= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') ==", "import addresses def test_pytest(): assert True def test_object_exists(): assert addresses.cidr_db", "address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo') addresses.cidr_db.reload() assert not", "assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert" ]
[ "{ 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2':", "as tx beam_width = 5 hidden_dim = 768 bert =", "'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg',", "** -0.5), # The warmup steps for the 'aiayn' and", "} # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45", "# The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning", "{ 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 } } }", "# From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder =", "- 'static' -> A simple static learning rate, specified by", "(hidden_dim ** -0.5), # The warmup steps for the 'aiayn'", "tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt = { 'optimizer': {", "'static_lr' # - 'aiayn' -> The learning rate used in", "# The static learning rate, when 'static' is used. 'static_lr':", "'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim", "# - 'aiayn' -> The learning rate used in the", "learning rate, specified by 'static_lr' # - 'aiayn' -> The", "3 values: # - 'static' -> A simple static learning", "the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim ** -0.5),", "hidden_dim = 768 bert = { 'pretrained_model_name': 'bert-base-uncased' } #", "{} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder", "{ 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer',", "= { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8,", "need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for", "- 'aiayn' -> The learning rate used in the \"Attention", "by 'static_lr' # - 'aiayn' -> The learning rate used", "be applied to the 'aiayn' learning rate. 'aiayn_multiplier': 0.2 }", "following 3 values: # - 'static' -> A simple static", "texar.tf as tx beam_width = 5 hidden_dim = 768 bert", "'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 } } } lr", "https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments", "'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, },", "'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform',", "# - 'static' -> A simple static learning rate, specified", "learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant':", "is all you need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The", "constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 *", "hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0,", "'aiayn' -> The learning rate used in the \"Attention is", "'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 } }", "{ 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {}", "can be applied to the 'aiayn' learning rate. 'aiayn_multiplier': 0.2", "5 hidden_dim = 768 bert = { 'pretrained_model_name': 'bert-base-uncased' }", "The static learning rate, when 'static' is used. 'static_lr': 1e-3,", "'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim ** -0.5), #", "lr = { # The 'learning_rate_schedule' can have the following", "paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's", "'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example 'learning_rate_schedule':", "The learning rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn', #", "A multiplier that can be applied to the 'aiayn' learning", "simple static learning rate, specified by 'static_lr' # - 'aiayn'", "The 'learning_rate_schedule' can have the following 3 values: # -", "rate, when 'static' is used. 'static_lr': 1e-3, # A multiplier", "rate 'lr_constant': 2 * (hidden_dim ** -0.5), # The warmup", "'epsilon': 1e-9 } } } lr = { # The", "is used. 'static_lr': 1e-3, # A multiplier that can be", "A simple static learning rate, specified by 'static_lr' # -", "-> The learning rate used in the \"Attention is all", "and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The static learning", "}, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode':", "specified by 'static_lr' # - 'aiayn' -> The learning rate", "'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', },", "static learning rate, when 'static' is used. 'static_lr': 1e-3, #", "2 * (hidden_dim ** -0.5), # The warmup steps for", "The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate", "The learning rate used in the \"Attention is all you", "# See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 #", "learning rate, when 'static' is used. 'static_lr': 1e-3, # A", "'learning_rate_schedule': 'aiayn', # The learning rate constant used for the", "# The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning", "'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs':", "multiplier that can be applied to the 'aiayn' learning rate.", "used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim", "0.9, 'beta2': 0.997, 'epsilon': 1e-9 } } } lr =", "for BERT decoder = { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention':", "for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim **", "values: # - 'static' -> A simple static learning rate,", "rate used in the \"Attention is all you need\" paper.", "\"Attention is all you need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' ->", "learning rate 'warmup_steps': 4000, # The static learning rate, when", "{ 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim':", "rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The learning", "} } } lr = { # The 'learning_rate_schedule' can", "# A multiplier that can be applied to the 'aiayn'", "* (hidden_dim ** -0.5), # The warmup steps for the", "'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim)", "https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder = { 'dim':", "} } lr = { # The 'learning_rate_schedule' can have", "'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence =", "768 bert = { 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams", "'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt", "the following 3 values: # - 'static' -> A simple", "= { 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder =", "0.9 opt = { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': {", "# The 'learning_rate_schedule' can have the following 3 values: #", "hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim },", "for Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The learning rate", "'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer':", "'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale':", "6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': {", "-0.5), # The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth'", "static learning rate, specified by 'static_lr' # - 'aiayn' ->", "the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The", "0.997, 'epsilon': 1e-9 } } } lr = { #", "decoder = { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads':", "}, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt =", "rate, specified by 'static_lr' # - 'aiayn' -> The learning", "'learning_rate_schedule' can have the following 3 values: # - 'static'", "example 'learning_rate_schedule': 'aiayn', # The learning rate constant used for", "adjustments for BERT decoder = { 'dim': hidden_dim, 'num_blocks': 6,", "rate 'warmup_steps': 4000, # The static learning rate, when 'static'", "warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps':", "1e-9 } } } lr = { # The 'learning_rate_schedule'", "'lr_constant': 2 * (hidden_dim ** -0.5), # The warmup steps", "'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The static", "1e-3, # A multiplier that can be applied to the", "'beta2': 0.997, 'epsilon': 1e-9 } } } lr = {", "loss_label_confidence = 0.9 opt = { 'optimizer': { 'type': 'AdamOptimizer',", "{ 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward':", "tx beam_width = 5 hidden_dim = 768 bert = {", "rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2", "'static_lr': 1e-3, # A multiplier that can be applied to", "<filename>config_model.py import texar.tf as tx beam_width = 5 hidden_dim =", "= { # The 'learning_rate_schedule' can have the following 3", "when 'static' is used. 'static_lr': 1e-3, # A multiplier that", "= {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT", "'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The static learning rate,", "the \"Attention is all you need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth'", "4000, # The static learning rate, when 'static' is used.", "}, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt = {", "} loss_label_confidence = 0.9 opt = { 'optimizer': { 'type':", "From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder = {", "= { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9,", "= 0.9 opt = { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs':", "- 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example", "'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence", "used. 'static_lr': 1e-3, # A multiplier that can be applied", "{ 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon':", "'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9", "'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997,", "you need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate", "'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 }", "BERT decoder = { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': {", "Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The learning rate constant", "bert = { 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder", "'warmup_steps': 4000, # The static learning rate, when 'static' is", "'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} #", "in the \"Attention is all you need\" paper. # -", "= 768 bert = { 'pretrained_model_name': 'bert-base-uncased' } # See", "{ # The 'learning_rate_schedule' can have the following 3 values:", "learning rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The", "'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9", "learning rate used in the \"Attention is all you need\"", "-> The learning rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn',", "The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate", "for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, #", "Transformer example 'learning_rate_schedule': 'aiayn', # The learning rate constant used", "8, 'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': {", "} lr = { # The 'learning_rate_schedule' can have the", "beam_width = 5 hidden_dim = 768 bert = { 'pretrained_model_name':", "have the following 3 values: # - 'static' -> A", "1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) }", "# with adjustments for BERT decoder = { 'dim': hidden_dim,", "'static' -> A simple static learning rate, specified by 'static_lr'", "opt = { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1':", "can have the following 3 values: # - 'static' ->", "-> A simple static learning rate, specified by 'static_lr' #", "that can be applied to the 'aiayn' learning rate. 'aiayn_multiplier':", "learning rate 'lr_constant': 2 * (hidden_dim ** -0.5), # The", "See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with", "with adjustments for BERT decoder = { 'dim': hidden_dim, 'num_blocks':", "= 5 hidden_dim = 768 bert = { 'pretrained_model_name': 'bert-base-uncased'", "'static' is used. 'static_lr': 1e-3, # A multiplier that can", "import texar.tf as tx beam_width = 5 hidden_dim = 768", "used in the \"Attention is all you need\" paper. #", "bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for", "'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': { 'type':", "'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt = { 'optimizer':", "'aiayn', # The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth'", "all you need\" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning", "steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000,", "{ 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution':", "# - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer", "'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From" ]
[ "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wishes',", "'0004_auto_20201029_0857'), ] operations = [ migrations.AlterField( model_name='gallery', name='image', field=models.FilePathField(path='/images'), ),", "Generated by Django 3.1.2 on 2020-10-29 09:04 from django.db import", "= [ ('wishes', '0004_auto_20201029_0857'), ] operations = [ migrations.AlterField( model_name='gallery',", "dependencies = [ ('wishes', '0004_auto_20201029_0857'), ] operations = [ migrations.AlterField(", "3.1.2 on 2020-10-29 09:04 from django.db import migrations, models class", "by Django 3.1.2 on 2020-10-29 09:04 from django.db import migrations,", "class Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'), ] operations =", "Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'), ] operations = [", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "Django 3.1.2 on 2020-10-29 09:04 from django.db import migrations, models", "[ ('wishes', '0004_auto_20201029_0857'), ] operations = [ migrations.AlterField( model_name='gallery', name='image',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'),", "# Generated by Django 3.1.2 on 2020-10-29 09:04 from django.db", "] operations = [ migrations.AlterField( model_name='gallery', name='image', field=models.FilePathField(path='/images'), ), ]", "on 2020-10-29 09:04 from django.db import migrations, models class Migration(migrations.Migration):", "models class Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'), ] operations", "migrations, models class Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'), ]", "('wishes', '0004_auto_20201029_0857'), ] operations = [ migrations.AlterField( model_name='gallery', name='image', field=models.FilePathField(path='/images'),", "09:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "2020-10-29 09:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies" ]
[ "\") raise arcpy.ExecuteError() #determing if year is leap year and", "if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise", "# Author: <NAME> <EMAIL> # NASA DEVELOP Program # Created:", "#this info determines the solar exoatmospheric irradiance (ESun) for each", "digital numbers to Radiance, Reflectance, or Temperature (if using Band", "{0}\".format(BandNum)) print \"Reflectance Calculated for Band {0}\".format(BandNum) f.close() return OutList", "\",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \",", "from digital numbers # to Radiance, Reflectance, or Temperature #", "Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for band 6 if", ") SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each band for pathname in", "file that is downloaded with the Landsat Bands themselves. This", "will be used to parse the meta data text file", "info determines the solar exoatmospheric irradiance (ESun) for each band", "Reflectance for spectral bands and Temperature in Kelvin for Thermal", "Name: Landsat Digital Numbers to Radiance/Reflectance # Purpose: To convert", "of arcpy raster objects in a sequence that mirrors that", "#These lists will be used to parse the meta data", "math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) )", "OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for band 6", "29, 2012. This tool can process either the new or", "-----Inputs------ Lbands: GeoTIFF files containing individual bands of Landsat imagery.", "Band {0}\".format(BandNum)) print \"Reflectance Calculated for Band {0}\".format(BandNum) f.close() return", "Meta=newMeta Band6length=8 #The tilename is located using the newMeta/oldMeta indixes", "Numbers to Radiance/Reflectance # Purpose: To convert landsat 4,5, or", "meta data text file and locate relevant information #metadata format", "can process either the new or old format newMeta=['LANDSAT_SCENE_ID =", "= \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION =", "located using the newMeta/oldMeta indixes and the date of capture", "band will be saved as an individual GeoTIFF file and", "used to identify old metadata #if this is not present,", "Band 6 name to match metadata if BandNum==\"6\" and spacecraft[8]==\"7\":", "scene. MetaData: The metadata text file that is downloaded with", "= \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} =", "year is leap year and setting the Days in year", "using the newMeta/oldMeta indixes and the date of capture is", "Band6length=8 #The tilename is located using the newMeta/oldMeta indixes and", "from which the imagery was capture is identified #this info", "or Temperature # Author: <NAME> <EMAIL> # NASA DEVELOP Program", "individual bands of Landsat imagery. These must have the original", "or Temperature (if using Band 6) -----Inputs------ Lbands: GeoTIFF files", "BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band {0}.", "either the new or old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED =", "the input Lbands \"\"\" OutList=[] #These lists will be used", "#determing if year is leap year and setting the Days", "PRODUCT_CREATION_TIME category is used to identify old metadata #if this", "= \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read() #the", "must be from a single scene. MetaData: The metadata text", "#the spacecraft from which the imagery was capture is identified", "ThermBands=[\"6\"] if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\"", "= ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0}", "0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values", "6 name string. In the new metadata this string is", "in which to save the output rasters -----Outputs----- A list", "new. #Band6length refers to the length of the Band 6", "7 or 8 \") raise arcpy.ExecuteError() #determing if year is", "in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9", "ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"]", "whether the output should be: \"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance", "for each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in", "identify old metadata #if this is not present, the meta", "arcpy.AddMessage(\"Proceeded through else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del", "a single scene. MetaData: The metadata text file that is", "the original filename and the output pixel unit *if this", "elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for band 6 if present if", "\"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \", \"LMAX_BAND{0} =", "in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0", "variable must also be set OutputFolder: Folder in which to", "jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which the imagery was capture", "elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\" in", "4, 5, 7 or 8 \") raise arcpy.ExecuteError() #determing if", "exoatmospheric irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"]", "numbers to Radiance, Reflectance, or Temperature (if using Band 6)", "objects in a sequence that mirrors that of the input", "+LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature", "from the sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta)", "the imagery was capture is identified #this info determines the", "newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0}", "in a sequence that mirrors that of the input Lbands", "#Calculating temperature for band 6 if present if BandNum in", "which to save the output rasters -----Outputs----- A list of", "downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing Band 6 name", "#the presence of a PRODUCT_CREATION_TIME category is used to identify", "tool only works for Landsat 4, 5, 7 or 8", "Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading", "also be set OutputFolder: Folder in which to save the", "# Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension(\"Spatial\") def", "match metadata if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band", "output should be: \"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance for spectral", "Boolean value that indicates whether the output rasters will be", "SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each band for pathname in Lbands:", "the min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0])", "* dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through", "rasters will be saved permanantly Each band will be saved", "2012. This tool can process either the new or old", "math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath))", "changed August 29, 2012. This tool can process either the", "was capture is identified #this info determines the solar exoatmospheric", "from digital numbers to Radiance, Reflectance, or Temperature (if using", "individual GeoTIFF file and be named accoriding to the original", "Temperature # Author: <NAME> <EMAIL> # NASA DEVELOP Program #", "else: Meta=newMeta Band6length=8 #The tilename is located using the newMeta/oldMeta", "arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing Band 6 name to", "the new or old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION", "reflectance else: Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1]", "the newMeta/oldMeta indixes and the date of capture is recorded", "try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band {0}. Bands", "original filename and the output pixel unit *if this is", "if\") #Otherwise calculate reflectance else: Refraster=( math.pi * Radraster *", "capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta:", "\", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE =", "calculate reflectance else: Refraster=( math.pi * Radraster * dSun2) /", "or new MTL.txt file. OutputType: Choose whether the output should", "oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0}", "that is downloaded with the Landsat Bands themselves. This may", "of capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif", "6 if present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded", "\",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME =", "bands and Temperature in Kelvin for Thermal bands Save: Boolean", "metadata text file that is downloaded with the Landsat Bands", "dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta)", "Reflectance, or Temperature (if using Band 6) -----Inputs------ Lbands: GeoTIFF", "ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate reflectance else:", "except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band {0}. Bands must have", "else:DIY=365. #using the date to determing the distance from the", "\"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\" in spacecraft:", "import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used to", "In the new metadata this string is longer if \"PRODUCT_CREATION_TIME\"", "files containing individual bands of Landsat imagery. These must have", "newMeta/oldMeta indixes and the date of capture is recorded if", "# Purpose: To convert landsat 4,5, or 7 pixel values", "downloaded with the Landsat Bands themselves. This may be either", "= \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"]", "indixes to pull the min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0])", "or old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \",", "then the OutputFolder variable must also be set OutputFolder: Folder", "Thermal bands Save: Boolean value that indicates whether the output", "year accordingly if float(year) % 4 ==0: DIY=366. else:DIY=365. #using", "TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the", "\"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \",", "Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin Oraster=0 if", "Landsat Bands themselves. This may be either the old or", "the old or new MTL.txt file. OutputType: Choose whether the", "information #metadata format was changed August 29, 2012. This tool", "#if this is not present, the meta data is considered", "<NAME> <EMAIL> # NASA DEVELOP Program # Created: 19/10/2012 #-------------------------------------------------------------------------------", "else: arcpy.AddError(\"This tool only works for Landsat 4, 5, 7", "band for pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except:", "be set OutputFolder: Folder in which to save the output", "spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta", "GeoTIFF file and be named accoriding to the original filename", "that mirrors that of the input Lbands \"\"\" OutList=[] #These", "Radiance, Reflectance, or Temperature # Author: <NAME> <EMAIL> # NASA", "OutputFolder: Folder in which to save the output rasters -----Outputs-----", "downloaded and must be from a single scene. MetaData: The", "must have the original names as downloaded and must be", "band 6 if present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum)", "pixel values from digital numbers to Radiance, Reflectance, or Temperature", "identified #this info determines the solar exoatmospheric irradiance (ESun) for", "year and setting the Days in year accordingly if float(year)", "math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used to convert", "will be saved permanantly Each band will be saved as", "= \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0}", "for each band for pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try:", "BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using", "elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool", "solar exoatmospheric irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0]", "DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used to convert Landsat 4,5, or", "is located using the newMeta/oldMeta indixes and the date of", "\"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to pull", "in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only works", "BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate reflectance else: Refraster=( math.pi", "an individual GeoTIFF file and be named accoriding to the", "be either the old or new MTL.txt file. OutputType: Choose", "elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which", "bands Save: Boolean value that indicates whether the output rasters", "MText=f.read() #the presence of a PRODUCT_CREATION_TIME category is used to", "in year accordingly if float(year) % 4 ==0: DIY=366. else:DIY=365.", "print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to", "sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta)", "and must be from a single scene. MetaData: The metadata", "arcpy.AddMessage( \"Reflectance Calculated for Band {0}\".format(BandNum)) print \"Reflectance Calculated for", "name string. In the new metadata this string is longer", "only works for Landsat 4, 5, 7 or 8 \")", "considered new. #Band6length refers to the length of the Band", "names as downloaded and must be from a single scene.", "spectral bands and Temperature in Kelvin for Thermal bands Save:", "new metadata this string is longer if \"PRODUCT_CREATION_TIME\" in MText:", "themselves. This may be either the old or new MTL.txt", "\"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION", "if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for band", "\"] f=open(MetaData) MText=f.read() #the presence of a PRODUCT_CREATION_TIME category is", "leap year and setting the Days in year accordingly if", "OutList=[] #These lists will be used to parse the meta", "string is longer if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2 else:", "BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate", "using Band 6) -----Inputs------ Lbands: GeoTIFF files containing individual bands", "Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to pull the", "\"\"\" OutList=[] #These lists will be used to parse the", "spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.)", "the sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta) +", "spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0.", "for Band {0}\".format(BandNum)) print \"Reflectance Calculated for Band {0}\".format(BandNum) f.close()", "the output rasters -----Outputs----- A list of arcpy raster objects", "which the imagery was capture is identified #this info determines", "determing the distance from the sun theta =2*math.pi*float(jday)/DIY dSun2 =", "BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band {0}. Bands must have original", "= \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE", "input Lbands \"\"\" OutList=[] #These lists will be used to", "the Band 6 name string. In the new metadata this", "that of the input Lbands \"\"\" OutList=[] #These lists will", ",0. ,80.72) elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else:", "float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster", "to convert Landsat 4,5, or 7 pixel values from digital", "Purpose: To convert landsat 4,5, or 7 pixel values from", "values for each band for pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2]", "function is used to convert Landsat 4,5, or 7 pixel", "To convert landsat 4,5, or 7 pixel values from digital", "data is considered new. #Band6length refers to the length of", "= \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"] f=open(MetaData)", "used to convert Landsat 4,5, or 7 pixel values from", "imagery was capture is identified #this info determines the solar", "irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if", "MTL.txt file. OutputType: Choose whether the output should be: \"Radiance\"", "in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate reflectance", "for Landsat 4, 5, 7 or 8 \") raise arcpy.ExecuteError()", "dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\")", "*if this is true, then the OutputFolder variable must also", "A list of arcpy raster objects in a sequence that", "is considered new. #Band6length refers to the length of the", "a PRODUCT_CREATION_TIME category is used to identify old metadata #if", "meta data is considered new. #Band6length refers to the length", "NASA DEVELOP Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import", "mirrors that of the input Lbands \"\"\" OutList=[] #These lists", "the solar exoatmospheric irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID =", "to determing the distance from the sun theta =2*math.pi*float(jday)/DIY dSun2", "refers to the length of the Band 6 name string.", "else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for Band {0}\".format(BandNum))", "to Radiance/Reflectance # Purpose: To convert landsat 4,5, or 7", "{0}. Bands must have original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print", "\"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read() #the presence of", "be saved as an individual GeoTIFF file and be named", "LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum))", "(ESun) for each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\"", "arcpy.AddError(\"This tool only works for Landsat 4, 5, 7 or", "= \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} =", "Band6length=2 else: Meta=newMeta Band6length=8 #The tilename is located using the", "the output pixel unit *if this is true, then the", "or 7 pixel values from digital numbers # to Radiance,", "#changing Band 6 name to match metadata if BandNum==\"6\" and", "These must have the original names as downloaded and must", "the Landsat Bands themselves. This may be either the old", "* math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if Save==True: Refraster.save(BandPath)", "<EMAIL> # NASA DEVELOP Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import", "year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft", "this is not present, the meta data is considered new.", "that indicates whether the output rasters will be saved permanantly", "indicates whether the output rasters will be saved permanantly Each", "- QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\":", "\",\"SUN_ELEVATION = \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} =", "#------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function", "used to parse the meta data text file and locate", "tilename is located using the newMeta/oldMeta indixes and the date", "\"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft:", "msg raise arcpy.ExecuteError #changing Band 6 name to match metadata", "to save the output rasters -----Outputs----- A list of arcpy", "bands of Landsat imagery. These must have the original names", "present, the meta data is considered new. #Band6length refers to", "This may be either the old or new MTL.txt file.", "ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only works for Landsat 4, 5,", "MetaData: The metadata text file that is downloaded with the", "original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing", "date of capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16]", "through else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster", "\"Reflectance Calculated for Band {0}\".format(BandNum)) print \"Reflectance Calculated for Band", "OutputFolder variable must also be set OutputFolder: Folder in which", "values from digital numbers to Radiance, Reflectance, or Temperature (if", "permanantly Each band will be saved as an individual GeoTIFF", "in Kelvin for Thermal bands Save: Boolean value that indicates", "output rasters will be saved permanantly Each band will be", "\", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} =", "to identify old metadata #if this is not present, the", ",80.67) elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\"", "to Radiance, Reflectance, or Temperature (if using Band 6) -----Inputs------", "Band 6 name string. In the new metadata this string", "in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3", "GeoTIFF files containing individual bands of Landsat imagery. These must", "as an individual GeoTIFF file and be named accoriding to", "format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} =", "locate relevant information #metadata format was changed August 29, 2012.", "determines the solar exoatmospheric irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID", "print msg raise arcpy.ExecuteError #changing Band 6 name to match", "pixel values from digital numbers # to Radiance, Reflectance, or", "Band 6) -----Inputs------ Lbands: GeoTIFF files containing individual bands of", "old or new MTL.txt file. OutputType: Choose whether the output", "ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\"", "temperature for band 6 if present if BandNum in ThermBands:", "have the original names as downloaded and must be from", "if year is leap year and setting the Days in", "#using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital", "names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing Band", "works for Landsat 4, 5, 7 or 8 \") raise", "arcpy import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used", "min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0])", "each band for pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum)", "\"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif \"8\" in spacecraft:", "is downloaded with the Landsat Bands themselves. This may be", "Calculated for Band {0}\".format(BandNum)) print \"Reflectance Calculated for Band {0}\".format(BandNum)", "as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing Band 6", "-----Outputs----- A list of arcpy raster objects in a sequence", "if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The", "or 7 pixel values from digital numbers to Radiance, Reflectance,", "of the Band 6 name string. In the new metadata", ",80.72) elif \"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This", "be from a single scene. MetaData: The metadata text file", "Kelvin for Thermal bands Save: Boolean value that indicates whether", "whether the output rasters will be saved permanantly Each band", "= (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) )", "either the old or new MTL.txt file. OutputType: Choose whether", "Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"):", "sequence that mirrors that of the input Lbands \"\"\" OutList=[]", "the output should be: \"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance for", "arcpy.ExecuteError() #determing if year is leap year and setting the", "Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The tilename is located using", "=2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+", "Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to pull the min/max for", "Each band will be saved as an individual GeoTIFF file", "have original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError", "Days in year accordingly if float(year) % 4 ==0: DIY=366.", "in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error", "be: \"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance for spectral bands and", "spacecraft from which the imagery was capture is identified #this", "numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin))", "rasters -----Outputs----- A list of arcpy raster objects in a", "- LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\":", "oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers LMax=", "of a PRODUCT_CREATION_TIME category is used to identify old metadata", "the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers", "filename and the output pixel unit *if this is true,", "Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180)", "Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for", "#Band6length refers to the length of the Band 6 name", "\", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read() #the presence", "year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which the imagery was", "Radiance/Reflectance # Purpose: To convert landsat 4,5, or 7 pixel", "true, then the OutputFolder variable must also be set OutputFolder:", "data text file and locate relevant information #metadata format was", "= \"] oldMeta=['BAND1_FILE_NAME = \"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \", \"LMAX_BAND{0}", "+ 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating", "ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67)", "tool can process either the new or old format newMeta=['LANDSAT_SCENE_ID", "Band {0}. Bands must have original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg)", "be saved permanantly Each band will be saved as an", "accordingly if float(year) % 4 ==0: DIY=366. else:DIY=365. #using the", "OutputType: Choose whether the output should be: \"Radiance\" \"Reflectance/Temperature\" -", "LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster -", "not present, the meta data is considered new. #Band6length refers", "process either the new or old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED", "\"8\" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only", "old metadata #if this is not present, the meta data", "if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance", "the Days in year accordingly if float(year) % 4 ==0:", "Bands themselves. This may be either the old or new", "/ (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if", "= \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"] oldMeta=['BAND1_FILE_NAME", "float(year) % 4 ==0: DIY=366. else:DIY=365. #using the date to", "parse the meta data text file and locate relevant information", "except: msg=\"Error reading Band {0}. Bands must have original names", "Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded", "Save: Boolean value that indicates whether the output rasters will", "OutputType==\"Reflectance/Temperature\": #Calculating temperature for band 6 if present if BandNum", "August 29, 2012. This tool can process either the new", "ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only works for Landsat", "and setting the Days in year accordingly if float(year) %", "the meta data is considered new. #Band6length refers to the", "BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster)", "be used to parse the meta data text file and", "band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0.", "= \",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read() #the presence of a", "as downloaded and must be from a single scene. MetaData:", "pull the min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0])", "+ 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each band", "\"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The tilename", "date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which the imagery was capture is", "arcpy raster objects in a sequence that mirrors that of", "single scene. MetaData: The metadata text file that is downloaded", "with the Landsat Bands themselves. This may be either the", "TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which the imagery", "if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname)", "metadata this string is longer if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta", "ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72)", "#Otherwise calculate reflectance else: Refraster=( math.pi * Radraster * dSun2)", "#------------------------------------------------------------------------------- # Name: Landsat Digital Numbers to Radiance/Reflectance # Purpose:", "(Oraster - QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif", "capture is identified #this info determines the solar exoatmospheric irradiance", "#metadata format was changed August 29, 2012. This tool can", "and Temperature in Kelvin for Thermal bands Save: Boolean value", "raise arcpy.ExecuteError #changing Band 6 name to match metadata if", "int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band {0}. Bands must", "def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used to convert Landsat 4,5,", "the new metadata this string is longer if \"PRODUCT_CREATION_TIME\" in", "the date to determing the distance from the sun theta", "file and locate relevant information #metadata format was changed August", "spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only works for", "the distance from the sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011", "and the output pixel unit *if this is true, then", "Landsat 4, 5, 7 or 8 \") raise arcpy.ExecuteError() #determing", "8 \") raise arcpy.ExecuteError() #determing if year is leap year", "The metadata text file that is downloaded with the Landsat", "list of arcpy raster objects in a sequence that mirrors", "\", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} =", "Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for", "is not present, the meta data is considered new. #Band6length", "Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated", "Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension(\"Spatial\")", "category is used to identify old metadata #if this is", "DIY=366. else:DIY=365. #using the date to determing the distance from", "(1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0])", "saved as an individual GeoTIFF file and be named accoriding", "for Thermal bands Save: Boolean value that indicates whether the", "may be either the old or new MTL.txt file. OutputType:", "\"\"\"This function is used to convert Landsat 4,5, or 7", "% 4 ==0: DIY=366. else:DIY=365. #using the date to determing", "is identified #this info determines the solar exoatmospheric irradiance (ESun)", "if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in", "to the length of the Band 6 name string. In", "radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax -", "file. OutputType: Choose whether the output should be: \"Radiance\" \"Reflectance/Temperature\"", "#The tilename is located using the newMeta/oldMeta indixes and the", "+ 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for", "Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0]", "for band 6 if present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0))", "msg=\"Error reading Band {0}. Bands must have original names as", "\",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read()", "of Landsat imagery. These must have the original names as", "float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin))", "the OutputFolder variable must also be set OutputFolder: Folder in", "was changed August 29, 2012. This tool can process either", "Choose whether the output should be: \"Radiance\" \"Reflectance/Temperature\" - Calculates", "length of the Band 6 name string. In the new", "QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating", "save the output rasters -----Outputs----- A list of arcpy raster", "or 8 \") raise arcpy.ExecuteError() #determing if year is leap", "and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the", "raise arcpy.ExecuteError() #determing if year is leap year and setting", "Landsat Digital Numbers to Radiance/Reflectance # Purpose: To convert landsat", "raster objects in a sequence that mirrors that of the", "output pixel unit *if this is true, then the OutputFolder", "Author: <NAME> <EMAIL> # NASA DEVELOP Program # Created: 19/10/2012", "6 name to match metadata if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length]", "QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin Oraster=0", "Reflectance, or Temperature # Author: <NAME> <EMAIL> # NASA DEVELOP", "for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax", "containing individual bands of Landsat imagery. These must have the", "',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} =", "* Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum)", "accoriding to the original filename and the output pixel unit", "del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for Band {0}\".format(BandNum)) print \"Reflectance", "string. In the new metadata this string is longer if", "if present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through", ",0. ,80.67) elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif", "the output rasters will be saved permanantly Each band will", "0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each band for pathname", "named accoriding to the original filename and the output pixel", "4,5, or 7 pixel values from digital numbers to Radiance,", "metadata if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum)", "= \", \"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0}", "else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage(", "lists will be used to parse the meta data text", "relevant information #metadata format was changed August 29, 2012. This", "try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except: msg=\"Error reading Band", "format was changed August 29, 2012. This tool can process", "set OutputFolder: Folder in which to save the output rasters", "is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0]", "should be: \"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance for spectral bands", "(ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if Save==True:", "- Calculates Reflectance for spectral bands and Temperature in Kelvin", "Landsat imagery. These must have the original names as downloaded", "19/10/2012 #------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This", "\"Radiance\" \"Reflectance/Temperature\" - Calculates Reflectance for spectral bands and Temperature", "convert Landsat 4,5, or 7 pixel values from digital numbers", "\"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif", "0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each band for", "convert landsat 4,5, or 7 pixel values from digital numbers", "and the date of capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0]", "Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from which the", "Temperature (if using Band 6) -----Inputs------ Lbands: GeoTIFF files containing", "OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for Band {0}\".format(BandNum)) print", "if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20]", "new MTL.txt file. OutputType: Choose whether the output should be:", "to pull the min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin=", "present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\")", "OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for Band", "4,5, or 7 pixel values from digital numbers # to", "the length of the Band 6 name string. In the", "4 ==0: DIY=366. else:DIY=365. #using the date to determing the", "arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is used to convert Landsat", "LMax= float(MText.split(Meta[3].format(BandNum))[1].split(\"\\n\")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split(\"\\n\")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) *", "is used to identify old metadata #if this is not", "value that indicates whether the output rasters will be saved", "#using the date to determing the distance from the sun", "* (Oraster - QCalMin)) +LMin Oraster=0 if OutputType==\"Radiance\": Radraster.save(\"{0}\\\\{1}_B{2}_Radiance.tif\".format(OutputFolder,TileName,BandNum)) Radraster=0", "{0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to pull the min/max", "digital numbers # to Radiance, Reflectance, or Temperature # Author:", "to parse the meta data text file and locate relevant", "pixel unit *if this is true, then the OutputFolder variable", "the original names as downloaded and must be from a", "text file and locate relevant information #metadata format was changed", "new or old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION =", "for pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0]", "theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) +", "\"',\"ACQUISITION_DATE = \",\"SUN_ELEVATION = \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \",", ",251.8,85.24,0.0,389.3,0.,0.) ThermBands=[\"10\",\"11\"] else: arcpy.AddError(\"This tool only works for Landsat 4,", "each band spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in spacecraft:", "name to match metadata if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print", "to Radiance, Reflectance, or Temperature # Author: <NAME> <EMAIL> #", "a sequence that mirrors that of the input Lbands \"\"\"", "to match metadata if BandNum==\"6\" and spacecraft[8]==\"7\": BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing", "= \"] f=open(MetaData) MText=f.read() #the presence of a PRODUCT_CREATION_TIME category", "is leap year and setting the Days in year accordingly", "Refraster,Radraster arcpy.AddMessage( \"Reflectance Calculated for Band {0}\".format(BandNum)) print \"Reflectance Calculated", "5, 7 or 8 \") raise arcpy.ExecuteError() #determing if year", "from a single scene. MetaData: The metadata text file that", "of the input Lbands \"\"\" OutList=[] #These lists will be", "Temperature in Kelvin for Thermal bands Save: Boolean value that", "file and be named accoriding to the original filename and", "output rasters -----Outputs----- A list of arcpy raster objects in", "this string is longer if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2", "saved permanantly Each band will be saved as an individual", "Lbands: GeoTIFF files containing individual bands of Landsat imagery. These", "\"Reflectance/Temperature\" - Calculates Reflectance for spectral bands and Temperature in", "\"RADIANCE_MAXIMUM_BAND_{0} = \",\"RADIANCE_MINIMUM_BAND_{0} = \", \"QUANTIZE_CAL_MAX_BAND_{0} = \",\"QUANTIZE_CAL_MIN_BAND_{0} = \"]", "Landsat 4,5, or 7 pixel values from digital numbers to", "is true, then the OutputFolder variable must also be set", "if float(year) % 4 ==0: DIY=366. else:DIY=365. #using the date", "and locate relevant information #metadata format was changed August 29,", "reading Band {0}. Bands must have original names as downloaded.\".format(str(inputbandnum))", "Radraster=0 elif OutputType==\"Reflectance/Temperature\": #Calculating temperature for band 6 if present", "else: Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] *", "Folder in which to save the output rasters -----Outputs----- A", "in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The tilename is", "Radiance, Reflectance, or Temperature (if using Band 6) -----Inputs------ Lbands:", "unit *if this is true, then the OutputFolder variable must", "f=open(MetaData) MText=f.read() #the presence of a PRODUCT_CREATION_TIME category is used", "must also be set OutputFolder: Folder in which to save", "\"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0} = \",\"QCALMIN_BAND{0} = \"]", "7 pixel values from digital numbers to Radiance, Reflectance, or", "DEVELOP Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import math", "QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split(\"\\n\")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split(\"\\n\")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin", "= \",\"SUN_ELEVATION = \", \"LMAX_BAND{0} = \",\"LMIN_BAND{0} = \", \"QCALMAX_BAND{0}", "the date of capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13]", "distance from the sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 +", "Calculates Reflectance for spectral bands and Temperature in Kelvin for", "setting the Days in year accordingly if float(year) % 4", "is longer if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta", "arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate reflectance else: Refraster=( math.pi *", "BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:Band6length] print \"Processing Band {0}\".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes", "7 pixel values from digital numbers # to Radiance, Reflectance,", "and be named accoriding to the original filename and the", "# NASA DEVELOP Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy", "text file that is downloaded with the Landsat Bands themselves.", ",82.07,1368.00) ThermBands=[\"B6_VCID_1\",\"B6_VCID_2\"] elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif", "values from digital numbers # to Radiance, Reflectance, or Temperature", "this is true, then the OutputFolder variable must also be", "the meta data text file and locate relevant information #metadata", "Digital Numbers to Radiance/Reflectance # Purpose: To convert landsat 4,5,", "\",\"QCALMIN_BAND{0} = \"] f=open(MetaData) MText=f.read() #the presence of a PRODUCT_CREATION_TIME", "original names as downloaded and must be from a single", "# Name: Landsat Digital Numbers to Radiance/Reflectance # Purpose: To", "# to Radiance, Reflectance, or Temperature # Author: <NAME> <EMAIL>", "(if using Band 6) -----Inputs------ Lbands: GeoTIFF files containing individual", "import arcpy import math arcpy.CheckOutExtension(\"Spatial\") def DNtoReflectance(Lbands,MetaData,OutputType=\"Reflectance/Temperature\",Save=False,OutputFolder=\"\"): \"\"\"This function is", "spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0.", "longer if \"PRODUCT_CREATION_TIME\" in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8", "pathname in Lbands: try: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0:2] try: int(BandNum) except: BandNum=pathname.split(\"\\\\\")[-1].split(\"B\")[1][0] except:", "==0: DIY=366. else:DIY=365. #using the date to determing the distance", "numbers # to Radiance, Reflectance, or Temperature # Author: <NAME>", "imagery. These must have the original names as downloaded and", "must have original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise", "presence of a PRODUCT_CREATION_TIME category is used to identify old", "6) -----Inputs------ Lbands: GeoTIFF files containing individual bands of Landsat", "to the original filename and the output pixel unit *if", "arcpy.ExecuteError #changing Band 6 name to match metadata if BandNum==\"6\"", "for spectral bands and Temperature in Kelvin for Thermal bands", "is used to convert Landsat 4,5, or 7 pixel values", "MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The tilename is located", "through if\") #Otherwise calculate reflectance else: Refraster=( math.pi * Radraster", "Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath=\"{0}\\\\{1}_B{2}_Temperature.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through if\") #Otherwise calculate reflectance else: Refraster=(", "date to determing the distance from the sun theta =2*math.pi*float(jday)/DIY", "jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\\n')[0] #the spacecraft from", "This tool can process either the new or old format", "spacecraft=MText.split('SPACECRAFT_ID = \"')[1].split('\"')[0] ThermBands=[\"6\"] if \"7\" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00)", "metadata #if this is not present, the meta data is", "will be saved as an individual GeoTIFF file and be", "be named accoriding to the original filename and the output", "landsat 4,5, or 7 pixel values from digital numbers #", "Lbands \"\"\" OutList=[] #These lists will be used to parse", "0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split(\"\\n\")[0]) #Calculating values for each", "#Calculating values for each band for pathname in Lbands: try:", ") BandPath=\"{0}\\\\{1}_B{2}_TOA_Reflectance.tif\".format(OutputFolder,TileName,BandNum) arcpy.AddMessage(\"Proceeded through else\") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else:", "Bands must have original names as downloaded.\".format(str(inputbandnum)) arcpy.AddError(msg) print msg", "recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('\"')[0] year=TileName[13:17]", "elif \"5\" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif \"4\" in", "indixes and the date of capture is recorded if Meta==newMeta:", "old format newMeta=['LANDSAT_SCENE_ID = \"','DATE_ACQUIRED = ',\"SUN_ELEVATION = \", \"RADIANCE_MAXIMUM_BAND_{0}" ]
[ "+ str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception, e: print", "10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print e", "def get_people(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self):", "Firefox/3.7\" self.quantity = \"100\" self.limit = int(limit) self.counter = 0", "e: print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS", "\"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2)", "Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except", "__init__(self, word, limit): self.word = word.replace(' ', '%20') self.results =", "X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print", "def __init__(self, word, limit): self.word = word.replace(' ', '%20') self.results", "+ \"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception,", "get_people(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self): while", "word.replace(' ', '%20') self.results = \"\" self.totalresults = \"\" self.server", "word, limit): self.word = word.replace(' ', '%20') self.results = \"\"", "r=requests.get(urly,headers=headers) except Exception,e: print e self.results = r.content self.totalresults +=", "def process(self): while (self.counter < self.limit): self.do_search() self.counter += 100", "self.results def get_people(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def", "import requests import sys import myparser import re class search_twitter:", "\"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception, e:", "NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity = \"100\" self.limit =", "rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self): while (self.counter", "= \"\" self.totalresults = \"\" self.server = \"www.google.com\" self.hostname =", "return rawres.people_twitter() def process(self): while (self.counter < self.limit): self.do_search() self.counter", "self.totalresults = \"\" self.server = \"www.google.com\" self.hostname = \"www.google.com\" self.userAgent", "self.word) return rawres.people_twitter() def process(self): while (self.counter < self.limit): self.do_search()", "= \"100\" self.limit = int(limit) self.counter = 0 def do_search(self):", "headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0)", "+ self.word except Exception, e: print e headers = {'User-Agent':'Mozilla/5.0", "Exception,e: print e self.results = r.content self.totalresults += self.results def", "(Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity =", "try: r=requests.get(urly,headers=headers) except Exception,e: print e self.results = r.content self.totalresults", "def do_search(self): try: urly=\"https://\"+ self.server + \"/search?num=100&start=\" + str(self.counter) +", "self.totalresults += self.results def get_people(self): rawres = myparser.parser(self.totalresults, self.word) return", "= \"\" self.server = \"www.google.com\" self.hostname = \"www.google.com\" self.userAgent =", "try: urly=\"https://\"+ self.server + \"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" +", "self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116", "self.results = \"\" self.totalresults = \"\" self.server = \"www.google.com\" self.hostname", "self.limit = int(limit) self.counter = 0 def do_search(self): try: urly=\"https://\"+", "self.server = \"www.google.com\" self.hostname = \"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows;", "str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception, e: print e", "= \"www.google.com\" self.hostname = \"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows; U;", "string import requests import sys import myparser import re class", "self.hostname = \"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows NT", "rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity = \"100\" self.limit = int(limit) self.counter", "class search_twitter: def __init__(self, word, limit): self.word = word.replace(' ',", "', '%20') self.results = \"\" self.totalresults = \"\" self.server =", "= 0 def do_search(self): try: urly=\"https://\"+ self.server + \"/search?num=100&start=\" +", "process(self): while (self.counter < self.limit): self.do_search() self.counter += 100 print", "self.counter = 0 def do_search(self): try: urly=\"https://\"+ self.server + \"/search?num=100&start=\"", "while (self.counter < self.limit): self.do_search() self.counter += 100 print \"\\tSearching", "+ \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception, e: print e headers", "self.server + \"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except", "Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print e self.results =", "import re class search_twitter: def __init__(self, word, limit): self.word =", "(self.counter < self.limit): self.do_search() self.counter += 100 print \"\\tSearching \"", "print e self.results = r.content self.totalresults += self.results def get_people(self):", "limit): self.word = word.replace(' ', '%20') self.results = \"\" self.totalresults", "= myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self): while (self.counter <", "import myparser import re class search_twitter: def __init__(self, word, limit):", "self.limit): self.do_search() self.counter += 100 print \"\\tSearching \" + str(self.counter)", "= int(limit) self.counter = 0 def do_search(self): try: urly=\"https://\"+ self.server", "self.quantity = \"100\" self.limit = int(limit) self.counter = 0 def", "do_search(self): try: urly=\"https://\"+ self.server + \"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\"", "\"\" self.server = \"www.google.com\" self.hostname = \"www.google.com\" self.userAgent = \"(Mozilla/5.0", "6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity = \"100\" self.limit = int(limit)", "= word.replace(' ', '%20') self.results = \"\" self.totalresults = \"\"", "Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers)", "print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X", "self.results = r.content self.totalresults += self.results def get_people(self): rawres =", "self.word except Exception, e: print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh;", "Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity = \"100\" self.limit", "\"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word except Exception, e: print e headers =", "\"100\" self.limit = int(limit) self.counter = 0 def do_search(self): try:", "requests import sys import myparser import re class search_twitter: def", "except Exception,e: print e self.results = r.content self.totalresults += self.results", "urly=\"https://\"+ self.server + \"/search?num=100&start=\" + str(self.counter) + \"&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20\" + self.word", "U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity = \"100\"", "re class search_twitter: def __init__(self, word, limit): self.word = word.replace('", "\"www.google.com\" self.hostname = \"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows", "rawres.people_twitter() def process(self): while (self.counter < self.limit): self.do_search() self.counter +=", "Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print e self.results = r.content", "(Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try:", "e self.results = r.content self.totalresults += self.results def get_people(self): rawres", "self.word = word.replace(' ', '%20') self.results = \"\" self.totalresults =", "\"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\" self.quantity", "= {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101", "self.counter += 100 print \"\\tSearching \" + str(self.counter) + \"", "Gecko/20100116 Firefox/3.7\" self.quantity = \"100\" self.limit = int(limit) self.counter =", "OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e:", "rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print e self.results", "< self.limit): self.do_search() self.counter += 100 print \"\\tSearching \" +", "self.do_search() self.counter += 100 print \"\\tSearching \" + str(self.counter) +", "myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self): while (self.counter < self.limit):", "int(limit) self.counter = 0 def do_search(self): try: urly=\"https://\"+ self.server +", "import string import requests import sys import myparser import re", "Exception, e: print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac", "= r.content self.totalresults += self.results def get_people(self): rawres = myparser.parser(self.totalresults,", "{'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'}", "sys import myparser import re class search_twitter: def __init__(self, word,", "import sys import myparser import re class search_twitter: def __init__(self,", "r.content self.totalresults += self.results def get_people(self): rawres = myparser.parser(self.totalresults, self.word)", "+= 100 print \"\\tSearching \" + str(self.counter) + \" results..\"", "= \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7\"", "'%20') self.results = \"\" self.totalresults = \"\" self.server = \"www.google.com\"", "\"\" self.totalresults = \"\" self.server = \"www.google.com\" self.hostname = \"www.google.com\"", "e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10;", "+= self.results def get_people(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter()", "myparser import re class search_twitter: def __init__(self, word, limit): self.word", "except Exception, e: print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel", "search_twitter: def __init__(self, word, limit): self.word = word.replace(' ', '%20')", "= \"www.google.com\" self.userAgent = \"(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US;", "0 def do_search(self): try: urly=\"https://\"+ self.server + \"/search?num=100&start=\" + str(self.counter)" ]
[ "json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\" + str(nn_lat) + \"&lng=\" +", "+ str(nn_lng))[\"data\"] print(locations) for location in locations: location_id = location[\"id\"]", "json access_token = \"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat = 56.296504", "= \"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng = 43.936059 def request(endpoint,", "\"?access_token=\" + access_token + \"&\" + req_params print(req) raw_response =", "request(endpoint, req_params = \"\"): req = api_url + endpoint +", "\"&\" + req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations", "= \"\"): req = api_url + endpoint + \"?access_token=\" +", "nn_lng = 43.936059 def request(endpoint, req_params = \"\"): req =", "+ \"&lng=\" + str(nn_lng))[\"data\"] print(locations) for location in locations: location_id", "str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"] print(locations) for location in locations:", "= 56.296504 nn_lng = 43.936059 def request(endpoint, req_params = \"\"):", "import urllib.request import json access_token = \"<KEY>\" api_url = \"https://api.instagram.com/v1\"", "locations: location_id = location[\"id\"] location_media = request(\"/locations/\" + str(location_id) +", "nn_lat = 56.296504 nn_lng = 43.936059 def request(endpoint, req_params =", "+ endpoint + \"?access_token=\" + access_token + \"&\" + req_params", "location_id = location[\"id\"] location_media = request(\"/locations/\" + str(location_id) + \"/media/recent\")", "+ \"?access_token=\" + access_token + \"&\" + req_params print(req) raw_response", "\"lat=\" + str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"] print(locations) for location", "= api_url + endpoint + \"?access_token=\" + access_token + \"&\"", "req = api_url + endpoint + \"?access_token=\" + access_token +", "req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\",", "\"\"): req = api_url + endpoint + \"?access_token=\" + access_token", "\"&lng=\" + str(nn_lng))[\"data\"] print(locations) for location in locations: location_id =", "print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\"", "urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\" + str(nn_lat) +", "access_token + \"&\" + req_params print(req) raw_response = urllib.request.urlopen(req).read() return", "= urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\" + str(nn_lat)", "in locations: location_id = location[\"id\"] location_media = request(\"/locations/\" + str(location_id)", "api_url = \"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng = 43.936059 def", "req_params = \"\"): req = api_url + endpoint + \"?access_token=\"", "= 43.936059 def request(endpoint, req_params = \"\"): req = api_url", "43.936059 def request(endpoint, req_params = \"\"): req = api_url +", "= location[\"id\"] location_media = request(\"/locations/\" + str(location_id) + \"/media/recent\") print(location_media)", "+ \"&\" + req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8'))", "+ str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"] print(locations) for location in", "api_url + endpoint + \"?access_token=\" + access_token + \"&\" +", "+ access_token + \"&\" + req_params print(req) raw_response = urllib.request.urlopen(req).read()", "access_token = \"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng", "import json access_token = \"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat =", "\"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng = 43.936059", "+ req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations =", "56.296504 nn_lng = 43.936059 def request(endpoint, req_params = \"\"): req", "for location in locations: location_id = location[\"id\"] location_media = request(\"/locations/\"", "print(locations) for location in locations: location_id = location[\"id\"] location_media =", "str(nn_lng))[\"data\"] print(locations) for location in locations: location_id = location[\"id\"] location_media", "return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\" + str(nn_lat) + \"&lng=\"", "= \"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng =", "urllib.request import json access_token = \"<KEY>\" api_url = \"https://api.instagram.com/v1\" nn_lat", "location in locations: location_id = location[\"id\"] location_media = request(\"/locations/\" +", "\"https://api.instagram.com/v1\" nn_lat = 56.296504 nn_lng = 43.936059 def request(endpoint, req_params", "endpoint + \"?access_token=\" + access_token + \"&\" + req_params print(req)", "raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request(\"/locations/search\", \"lat=\" +", "locations = request(\"/locations/search\", \"lat=\" + str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"]", "def request(endpoint, req_params = \"\"): req = api_url + endpoint", "request(\"/locations/search\", \"lat=\" + str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"] print(locations) for", "= request(\"/locations/search\", \"lat=\" + str(nn_lat) + \"&lng=\" + str(nn_lng))[\"data\"] print(locations)" ]
[ "x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator,", "emulate wtforms form.\"\"\" pass class DummyFile(object): \"\"\"Dummy file like class", "raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator = allowed_file() extensions", "valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo',", "upper=None, lower=None, numeric=None, special=None) valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\",", "for app.validators. \"\"\" from wtforms import ValidationError import flask from", "password_rules(length=100, message=\"custom message\") field = DummyField() field.data = \"wrong\" with", "field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError)", "wtforms import ValidationError import flask from pytest import raises from", "invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom message\") field =", "= ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid", "lower=None, numeric=None, special=None) valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\",", "__repr__(self): return self.filename def _run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs tests", "= [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"]", "field.\"\"\" def __init__(self, data=None, errors=(), raw_data=None): self.data = data self.errors", "[\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests,", "special=None) valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid =", "= [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests):", "invalid: field.data = item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field)", "password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\",", "test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid", "filename): self.filename = filename def __repr__(self): return self.filename def _run_validator_check(subtests,", "[[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] =", "\"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator =", "flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) ==", "= raw_data def gettext(self, string): return string def ngettext(self, singular,", "field = DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with", "valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"]", "\"\"\"Dummy file like class to emulate uploaded file handler.\"\"\" def", "flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator =", "from wtforms import ValidationError import flask from pytest import raises", "test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid =", "validator, valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1,", "pytest import raises from app.utils.validators import password_rules, image_file, allowed_file class", "DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid)", "self.filename def _run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs tests again validator", "validator, valid, invalid): \"\"\"Runs tests again validator with valid and", "def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html']", "raises from app.utils.validators import password_rules, image_file, allowed_file class DummyField(object): \"\"\"Dummy", "= list(errors) self.raw_data = raw_data def gettext(self, string): return string", "DummyFile(object): \"\"\"Dummy file like class to emulate uploaded file handler.\"\"\"", "invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None)", "against valid: List of valid inputs invalid: List of invalid", "= ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff',", "[DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid,", "field.data = item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def", "flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator = image_file(message=\"custom", "flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def", "e: validator(DummyForm(), field) assert str(e.value) == \"custom message\" def test_password_rules_length(subtests):", "self.errors = list(errors) self.raw_data = raw_data def gettext(self, string): return", "'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x) for x in", "extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png',", "DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with", "valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None,", "'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x)", "validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid =", "allowed_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] =", "_run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator = image_file()", "= [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS']", "[\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator,", "n): return singular class DummyForm(dict): \"\"\"Dummy form object to emulate", "plural, n): return singular class DummyForm(dict): \"\"\"Dummy form object to", "valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] =", "invalid): \"\"\"Runs tests again validator with valid and invalid inputs.", "extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context):", "validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions", "def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid", "def test_allowed_file(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html']", "validator, valid, invalid) def test_image_file_message(req_context): validator = image_file(message=\"custom message\") field", "special=None) valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\",", "with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator", "def _run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs tests again validator with", "import ValidationError import flask from pytest import raises from app.utils.validators", "return self.filename def _run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs tests again", "x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator,", "again validator with valid and invalid inputs. Args: subtest: Subtests", "\"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6,", "item with subtests.test(item=item): validator(DummyForm(), field) for item in invalid: field.data", "= ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg',", "'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x) for x", "invalid = [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions", "import password_rules, image_file, allowed_file class DummyField(object): \"\"\"Dummy field object to", "= ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x", "flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def", "DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with", "= [\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid)", "upper=None, lower=None, numeric=2, special=None) valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid", "validator: Validator instance to run tests against valid: List of", "class DummyField(object): \"\"\"Dummy field object to emulate wtforms field.\"\"\" def", "[\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator", "extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator", "'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x) for", "valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"]", "raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == \"custom message\"", "'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe',", "special=3) valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\", \".23134\",", "= item with subtests.test(item=item): validator(DummyForm(), field) for item in invalid:", "valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None,", "req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid", "Validator instance to run tests against valid: List of valid", "wtforms field.\"\"\" def __init__(self, data=None, errors=(), raw_data=None): self.data = data", "for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests,", "'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x) for x", "\"custom message\" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None,", "= ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid =", "invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid) def", "= data self.errors = list(errors) self.raw_data = raw_data def gettext(self,", "def test_image_file_multiple(subtests, req_context): validator = image_file() extensions = ['jpg', 'png',", "def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid", "valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]]", "= image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg',", "ValidationError import flask from pytest import raises from app.utils.validators import", "numeric=None, special=None) valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid", "['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x)", "DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e:", "def gettext(self, string): return string def ngettext(self, singular, plural, n):", "app.utils.validators import password_rules, image_file, allowed_file class DummyField(object): \"\"\"Dummy field object", "extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo']", "instance to run tests against valid: List of valid inputs", "subtest: Subtests fixture. validator: Validator instance to run tests against", "test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid =", "invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid)", "form object to emulate wtforms form.\"\"\" pass class DummyFile(object): \"\"\"Dummy", "DummyField() for item in valid: field.data = item with subtests.test(item=item):", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom", "valid, invalid) def test_image_file_multiple(subtests, req_context): validator = image_file() extensions =", "_run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs tests again validator with valid", "message\") field = DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo']", "\"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator =", "'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x in valid]", "'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x) for", "assert str(e.value) == \"custom message\" def test_image_file(subtests, req_context): validator =", "for item in invalid: field.data = item with subtests.test(item=item): with", "inputs invalid: List of invalid inputs \"\"\" field = DummyField()", "[[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid =", "validator, valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None,", "= [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests,", "uploaded file handler.\"\"\" def __init__(self, filename): self.filename = filename def", "with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator =", "invalid) def test_image_file_multiple(subtests, req_context): validator = image_file() extensions = ['jpg',", "field) assert str(e.value) == \"custom message\" def test_image_file(subtests, req_context): validator", "with valid and invalid inputs. Args: subtest: Subtests fixture. validator:", "= [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid) def", "def test_image_file_message(req_context): validator = image_file(message=\"custom message\") field = DummyField() field.data", "[DummyFile(x) for x in valid] invalid = [DummyFile(x) for x", "[DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid],", "valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\",", "valid, invalid): \"\"\"Runs tests again validator with valid and invalid", "invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None)", "file like class to emulate uploaded file handler.\"\"\" def __init__(self,", "\"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests,", "\"abcdef123><?\"] invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid)", "field = DummyField() field.data = \"wrong\" with raises(ValidationError) as e:", "class to emulate uploaded file handler.\"\"\" def __init__(self, filename): self.filename", "invalid = [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions", "password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"]", "[DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator,", "validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid = [\"as123.21\",", "with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator = allowed_file()", "flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom", "invalid: List of invalid inputs \"\"\" field = DummyField() for", "field = DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with", "= [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests,", "['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid =", "self.filename = filename def __repr__(self): return self.filename def _run_validator_check(subtests, validator,", "message\" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None)", "\"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests):", "def __repr__(self): return self.filename def _run_validator_check(subtests, validator, valid, invalid): \"\"\"Runs", "allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip',", "invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]]", "of invalid inputs \"\"\" field = DummyField() for item in", "'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x in valid] invalid", "\"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\", \"123\", \"....\", \"aBcDe\",", "password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\",", "validator = password_rules(length=100, message=\"custom message\") field = DummyField() field.data =", "test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid =", "in valid: field.data = item with subtests.test(item=item): validator(DummyForm(), field) for", "x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for", "\"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid,", "for item in valid: field.data = item with subtests.test(item=item): validator(DummyForm(),", "invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3)", "'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif']", "= DummyField() for item in valid: field.data = item with", "_run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\")", "with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == \"custom", "def __init__(self, filename): self.filename = filename def __repr__(self): return self.filename", "List of valid inputs invalid: List of invalid inputs \"\"\"", "invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests,", "= [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests):", "invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for", "emulate uploaded file handler.\"\"\" def __init__(self, filename): self.filename = filename", "= filename def __repr__(self): return self.filename def _run_validator_check(subtests, validator, valid,", "[\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator", "def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid", "\".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator =", "numeric=None, special=None) valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\",", "x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with", "test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom message\") field = DummyField() field.data", "\".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid,", "\"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\", \"123\",", "class DummyFile(object): \"\"\"Dummy file like class to emulate uploaded file", "Args: subtest: Subtests fixture. validator: Validator instance to run tests", "__init__(self, filename): self.filename = filename def __repr__(self): return self.filename def", "req_context): validator = allowed_file() extensions = ['exe', 'html'] valid =", "['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf',", "with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator =", "x in valid] invalid = [DummyFile(x) for x in invalid]", "as e: validator(DummyForm(), field) assert str(e.value) == \"custom message\" def", "special=None) valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid =", "validator, valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3,", "for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions", "\"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6,", "lower=None, numeric=None, special=None) valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"]", "valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2,", "raw_data=None): self.data = data self.errors = list(errors) self.raw_data = raw_data", "def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid", "'.jpg', 'o.gif'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],", "== \"custom message\" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None,", "invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid) def", "password_rules, image_file, allowed_file class DummyField(object): \"\"\"Dummy field object to emulate", "in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid,", "\"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6,", "validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid = [\"abcdefg\",", "valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\") field =", "= [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests):", "[\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\",", "in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS']", "= DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context():", "with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator", "'o.gif'] valid = [DummyFile(x) for x in valid] invalid =", "assert str(e.value) == \"custom message\" def test_password_rules_length(subtests): validator = password_rules(length=6,", "data=None, errors=(), raw_data=None): self.data = data self.errors = list(errors) self.raw_data", "ngettext(self, singular, plural, n): return singular class DummyForm(dict): \"\"\"Dummy form", "def test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom message\") field = DummyField()", "return singular class DummyForm(dict): \"\"\"Dummy form object to emulate wtforms", "valid: field.data = item with subtests.test(item=item): validator(DummyForm(), field) for item", "validator = allowed_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah.foo\")", "upper=None, lower=None, numeric=None, special=3) valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid", "test_image_file_message(req_context): validator = image_file(message=\"custom message\") field = DummyField() field.data =", "invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions = ['exe',", "invalid) def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\") field = DummyField()", "with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator", "'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid", "validator(DummyForm(), field) assert str(e.value) == \"custom message\" def test_image_file(subtests, req_context):", "\"\"\"Dummy field object to emulate wtforms field.\"\"\" def __init__(self, data=None,", "for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions", "e: validator(DummyForm(), field) assert str(e.value) == \"custom message\" def test_image_file(subtests,", "flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(),", "validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid = [\"1bcd4A.d\",", "extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator", "\"\"\"Unit tests for app.validators. \"\"\" from wtforms import ValidationError import", "gettext(self, string): return string def ngettext(self, singular, plural, n): return", "item in invalid: field.data = item with subtests.test(item=item): with raises(ValidationError):", "= DummyField() field.data = \"wrong\" with raises(ValidationError) as e: validator(DummyForm(),", "message\" def test_image_file(subtests, req_context): validator = image_file() extensions = ['jpg',", "valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2,", "upper=2, lower=1, numeric=1, special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid", "lower=None, numeric=None, special=3) valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid =", "lower=1, numeric=1, special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid =", "'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid =", "handler.\"\"\" def __init__(self, filename): self.filename = filename def __repr__(self): return", "import flask from pytest import raises from app.utils.validators import password_rules,", "flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator =", "file handler.\"\"\" def __init__(self, filename): self.filename = filename def __repr__(self):", "validator, valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None,", "in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x", "[\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"]", "'.jpg', 'o.gif'] valid = [DummyFile(x) for x in valid] invalid", "= [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with", "invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1)", "<reponame>kajusK/HiddenPlaces \"\"\"Unit tests for app.validators. \"\"\" from wtforms import ValidationError", "['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x in", "field) for item in invalid: field.data = item with subtests.test(item=item):", "= ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe',", "validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid = [\"abcDEf\",", "= item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests,", "\"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator =", "test_image_file(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff']", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None,", "for x in valid] invalid = [DummyFile(x) for x in", "def test_image_file(subtests, req_context): validator = image_file() extensions = ['jpg', 'png',", "invalid = [\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid,", "= ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x", "= [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid =", "password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"]", "data self.errors = list(errors) self.raw_data = raw_data def gettext(self, string):", "'foo.html'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]),", "item in valid: field.data = item with subtests.test(item=item): validator(DummyForm(), field)", "to emulate uploaded file handler.\"\"\" def __init__(self, filename): self.filename =", "invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid)", "'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid =", "to run tests against valid: List of valid inputs invalid:", "validator(DummyForm(), field) for item in invalid: field.data = item with", "[\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"]", "def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid", "emulate wtforms field.\"\"\" def __init__(self, data=None, errors=(), raw_data=None): self.data =", "[[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] =", "field.data = \"wrong\" with raises(ValidationError) as e: validator(DummyForm(), field) assert", "return string def ngettext(self, singular, plural, n): return singular class", "self.raw_data = raw_data def gettext(self, string): return string def ngettext(self,", "'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x in valid],", "= password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\",", "from app.utils.validators import password_rules, image_file, allowed_file class DummyField(object): \"\"\"Dummy field", "with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value)", "pass class DummyFile(object): \"\"\"Dummy file like class to emulate uploaded", "\"custom message\" def test_image_file(subtests, req_context): validator = image_file() extensions =", "with subtests.test(item=item): validator(DummyForm(), field) for item in invalid: field.data =", "singular, plural, n): return singular class DummyForm(dict): \"\"\"Dummy form object", "validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator = allowed_file() extensions =", "['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x)", "str(e.value) == \"custom message\" def test_image_file(subtests, req_context): validator = image_file()", "validator = image_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah\")", "_run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator = image_file(message=\"custom message\")", "validator, valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\") field", "\"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator", "import raises from app.utils.validators import password_rules, image_file, allowed_file class DummyField(object):", "def test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\") field = DummyField() field.data", "= \"wrong\" with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value)", "'o.gif'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]),", "= password_rules(length=100, message=\"custom message\") field = DummyField() field.data = \"wrong\"", "\"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid)", "\"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid,", "\"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator =", "x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with", "__init__(self, data=None, errors=(), raw_data=None): self.data = data self.errors = list(errors)", "for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests,", "'', '.jpg', 'o.gif'] valid = [DummyFile(x) for x in valid]", "wtforms form.\"\"\" pass class DummyFile(object): \"\"\"Dummy file like class to", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2,", "valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\",", "\"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests,", "numeric=2, special=None) valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\",", "valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions =", "image_file, allowed_file class DummyField(object): \"\"\"Dummy field object to emulate wtforms", "tests against valid: List of valid inputs invalid: List of", "valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] =", "= allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe',", "= image_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS']", "\"wrong\" with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) ==", "valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None,", "= [DummyFile(x) for x in valid] invalid = [DummyFile(x) for", "= DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context():", "test_allowed_file(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid", "flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(),", "'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x) for x in", "lower=3, numeric=None, special=None) valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"]", "DummyField(object): \"\"\"Dummy field object to emulate wtforms field.\"\"\" def __init__(self,", "\"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100,", "['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert", "= allowed_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS']", "list(errors) self.raw_data = raw_data def gettext(self, string): return string def", "tests for app.validators. \"\"\" from wtforms import ValidationError import flask", "in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context():", "upper=2, lower=None, numeric=None, special=None) valid = [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\",", "= extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context):", "from pytest import raises from app.utils.validators import password_rules, image_file, allowed_file", "[DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator,", "\"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator =", "field) assert str(e.value) == \"custom message\" def test_password_rules_length(subtests): validator =", "in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid,", "_run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file()", "inputs \"\"\" field = DummyField() for item in valid: field.data", "subtests.test(item=item): validator(DummyForm(), field) for item in invalid: field.data = item", "= DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as", "self.data = data self.errors = list(errors) self.raw_data = raw_data def", "to emulate wtforms field.\"\"\" def __init__(self, data=None, errors=(), raw_data=None): self.data", "'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid", "= password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid = [\"abcdefg\", \"axzBAR\",", "special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\",", "\"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6,", "field object to emulate wtforms field.\"\"\" def __init__(self, data=None, errors=(),", "\"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator,", "app.validators. \"\"\" from wtforms import ValidationError import flask from pytest", "image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG',", "invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid) def", "in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context():", "test_allowed_file_message(req_context): validator = allowed_file(message=\"custom message\") field = DummyField() field.data =", "[\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator, valid, invalid) def", "field.data = DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError)", "message\") field = DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo']", "= ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field)", "item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context):", "valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid", "validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator = image_file() extensions", "\"\"\"Dummy form object to emulate wtforms form.\"\"\" pass class DummyFile(object):", "object to emulate wtforms form.\"\"\" pass class DummyFile(object): \"\"\"Dummy file", "invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None)", "\"\"\" field = DummyField() for item in valid: field.data =", "\"\"\"Runs tests again validator with valid and invalid inputs. Args:", "\"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6,", "\"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid,", "message=\"custom message\") field = DummyField() field.data = \"wrong\" with raises(ValidationError)", "valid and invalid inputs. Args: subtest: Subtests fixture. validator: Validator", "\"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid, invalid)", "raw_data def gettext(self, string): return string def ngettext(self, singular, plural,", "like class to emulate uploaded file handler.\"\"\" def __init__(self, filename):", "test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid =", "object to emulate wtforms field.\"\"\" def __init__(self, data=None, errors=(), raw_data=None):", "Subtests fixture. validator: Validator instance to run tests against valid:", "= password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid = [\"abcDEf\", \"HellOO\",", "image_file(message=\"custom message\") field = DummyField() field.data = DummyFile(\"blah\") flask.current_app.config['IMAGE_EXTENSIONS'] =", "'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '',", "valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE',", "\"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator", "validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid = [\"ABc1.2\",", "filename def __repr__(self): return self.filename def _run_validator_check(subtests, validator, valid, invalid):", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None,", "password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\",", "invalid inputs \"\"\" field = DummyField() for item in valid:", "invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests,", "= extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests,", "for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x)", "valid inputs invalid: List of invalid inputs \"\"\" field =", "= [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid", "valid, invalid) def test_image_file_message(req_context): validator = image_file(message=\"custom message\") field =", "extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context):", "= [\"abcDEf\", \"HellOO\", \"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\",", "numeric=None, special=3) valid = [\"ab.?123!\", \".#@dS9\", \"abcdef123><?\"] invalid = [\"abcdef\",", "'foo.html'] valid = [DummyFile(x) for x in valid] invalid =", "= DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as", "errors=(), raw_data=None): self.data = data self.errors = list(errors) self.raw_data =", "invalid) def test_image_file_message(req_context): validator = image_file(message=\"custom message\") field = DummyField()", "_run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None,", "validator, valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None,", "field) def test_allowed_file(subtests, req_context): validator = allowed_file() extensions = ['exe',", "DummyFile(\"blah.foo\") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e:", "[DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid,", "['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg']", "DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]),", "numeric=None, special=None) valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid", "tests again validator with valid and invalid inputs. Args: subtest:", "def ngettext(self, singular, plural, n): return singular class DummyForm(dict): \"\"\"Dummy", "= [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS']", "\"ABCDEZ\", \"A.b#3CZ\", \"ADSDSA\"] invalid = [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator,", "'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x)", "[\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator", "validator with valid and invalid inputs. Args: subtest: Subtests fixture.", "[\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator", "valid = [DummyFile(x) for x in valid] invalid = [DummyFile(x)", "inputs. Args: subtest: Subtests fixture. validator: Validator instance to run", "test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid =", "= [\"abcdEf\", \"helloo\", \"A231sdsd\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests):", "field = DummyField() for item in valid: field.data = item", "\"AbCdEf\"] invalid = [\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"] _run_validator_check(subtests, validator,", "form.\"\"\" pass class DummyFile(object): \"\"\"Dummy file like class to emulate", "[DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context():", "of valid inputs invalid: List of invalid inputs \"\"\" field", "lower=None, numeric=2, special=None) valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid =", "['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x in", "str(e.value) == \"custom message\" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None,", "password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"]", "test_image_file_multiple(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff']", "= [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\",", "\"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\", \"123\", \"....\",", "def __init__(self, data=None, errors=(), raw_data=None): self.data = data self.errors =", "in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS']", "\"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator,", "invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid)", "string def ngettext(self, singular, plural, n): return singular class DummyForm(dict):", "= password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid = [\"as123.21\", \"abcdef\",", "[\"2ds.#<\", \"abcdef\", \"ABCDEF\", \"x2U.'Q\"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests):", "List of invalid inputs \"\"\" field = DummyField() for item", "= password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid = [\"1bcd4A.d\", \"123456\",", "= password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid = [\"ab.?123!\", \".#@dS9\",", "DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid)", "\"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"] invalid = [\"abc\", \"123\", \"....\", \"aBcDe\", \"a1.V3\"]", "flask from pytest import raises from app.utils.validators import password_rules, image_file,", "subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator =", "valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom message\") field", "['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html']", "\"\"\" from wtforms import ValidationError import flask from pytest import", "numeric=1, special=1) valid = [\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\",", "DummyField() field.data = \"wrong\" with raises(ValidationError) as e: validator(DummyForm(), field)", "field.data = item with subtests.test(item=item): validator(DummyForm(), field) for item in", "[DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context():", "in invalid: field.data = item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(),", "'', '.jpg', 'o.gif'] valid = [[DummyFile(x) for x in valid],", "class DummyForm(dict): \"\"\"Dummy form object to emulate wtforms form.\"\"\" pass", "invalid = [\"abcdef\", \".23134\", \"AbCd123,]\"] _run_validator_check(subtests, validator, valid, invalid) def", "fixture. validator: Validator instance to run tests against valid: List", "= [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with", "validator = allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg',", "run tests against valid: List of valid inputs invalid: List", "\"axzBAR\", \"123abcdsa\", \"AbCdEfGh\", \"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests,", "= extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests,", "to emulate wtforms form.\"\"\" pass class DummyFile(object): \"\"\"Dummy file like", "validator(DummyForm(), field) assert str(e.value) == \"custom message\" def test_password_rules_length(subtests): validator", "validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid = [\"ab.?123!\",", "= extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context):", "DummyForm(dict): \"\"\"Dummy form object to emulate wtforms form.\"\"\" pass class", "upper=None, lower=3, numeric=None, special=None) valid = [\"abcdefg\", \"axzBAR\", \"123abcdsa\", \"AbCdEfGh\",", "\"..as..2ds..\"] invalid = [\"foOBAR\", \"123ABcdSA\", \"1a2b.C#\"] _run_validator_check(subtests, validator, valid, invalid)", "valid = [\"1bcd4A.d\", \"123456\", \"a?9#.0\"] invalid = [\"2ds.#<\", \"abcdef\", \"ABCDEF\",", "'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])],", "valid: List of valid inputs invalid: List of invalid inputs", "[\"ABc1.2\", \"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator,", "and invalid inputs. Args: subtest: Subtests fixture. validator: Validator instance", "invalid inputs. Args: subtest: Subtests fixture. validator: Validator instance to", "message\") field = DummyField() field.data = \"wrong\" with raises(ValidationError) as", "validator, valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message=\"custom message\")", "invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid", "string): return string def ngettext(self, singular, plural, n): return singular", "singular class DummyForm(dict): \"\"\"Dummy form object to emulate wtforms form.\"\"\"", "== \"custom message\" def test_image_file(subtests, req_context): validator = image_file() extensions", "special=None) valid = [\"as123.21\", \"abcdef\", \"sdadadaswasasa\", \"1234567\", \"...,.,..,\", \"AAAAAAA\", \"AbCdEf\"]", "test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid =", "[DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])],", "'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf',", "valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1,", "\"abcDEF123#%^\", \"a2B.C?\"] invalid = [\"helloo\", \"ABCDEF\", \"Ab1.?c\"] _run_validator_check(subtests, validator, valid,", "allowed_file class DummyField(object): \"\"\"Dummy field object to emulate wtforms field.\"\"\"", "def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid", "'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg',", "invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for" ]
[ "<filename>ts_eval/utils/nans.py<gh_stars>1-10 import warnings import numpy as np def nans_in_same_positions(*arrays): \"\"\"", "they have NaNs in the same positions. \"\"\" if len(arrays)", "def nanmeanw(arr, axis=None): \"\"\" Computes nanmean without raising a warning", "np def nans_in_same_positions(*arrays): \"\"\" Compares all provided arrays to see", "see if they have NaNs in the same positions. \"\"\"", "== np.isnan(arr)).all(): return False return True def nanmeanw(arr, axis=None): \"\"\"", "axis=None): \"\"\" Computes nanmean without raising a warning in case", "in the dataset \"\"\" with warnings.catch_warnings(): warnings.simplefilter(\"ignore\", category=RuntimeWarning) return np.nanmean(arr,", "in arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return", "NaNs in the same positions. \"\"\" if len(arrays) == 0:", "Compares all provided arrays to see if they have NaNs", "import warnings import numpy as np def nans_in_same_positions(*arrays): \"\"\" Compares", "in the same positions. \"\"\" if len(arrays) == 0: return", "import numpy as np def nans_in_same_positions(*arrays): \"\"\" Compares all provided", "of NaNs in the dataset \"\"\" with warnings.catch_warnings(): warnings.simplefilter(\"ignore\", category=RuntimeWarning)", "Computes nanmean without raising a warning in case of NaNs", "return True for arr in arrays[1:]: if not (np.isnan(arrays[0]) ==", "len(arrays) == 0: return True for arr in arrays[1:]: if", "a warning in case of NaNs in the dataset \"\"\"", "if len(arrays) == 0: return True for arr in arrays[1:]:", "\"\"\" Compares all provided arrays to see if they have", "return True def nanmeanw(arr, axis=None): \"\"\" Computes nanmean without raising", "nanmeanw(arr, axis=None): \"\"\" Computes nanmean without raising a warning in", "in case of NaNs in the dataset \"\"\" with warnings.catch_warnings():", "arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return True", "== 0: return True for arr in arrays[1:]: if not", "True for arr in arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all():", "for arr in arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return", "raising a warning in case of NaNs in the dataset", "\"\"\" if len(arrays) == 0: return True for arr in", "arr in arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False", "return False return True def nanmeanw(arr, axis=None): \"\"\" Computes nanmean", "if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return True def", "\"\"\" Computes nanmean without raising a warning in case of", "without raising a warning in case of NaNs in the", "numpy as np def nans_in_same_positions(*arrays): \"\"\" Compares all provided arrays", "warnings import numpy as np def nans_in_same_positions(*arrays): \"\"\" Compares all", "arrays to see if they have NaNs in the same", "have NaNs in the same positions. \"\"\" if len(arrays) ==", "nans_in_same_positions(*arrays): \"\"\" Compares all provided arrays to see if they", "False return True def nanmeanw(arr, axis=None): \"\"\" Computes nanmean without", "warning in case of NaNs in the dataset \"\"\" with", "if they have NaNs in the same positions. \"\"\" if", "the same positions. \"\"\" if len(arrays) == 0: return True", "(np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return True def nanmeanw(arr, axis=None):", "np.isnan(arr)).all(): return False return True def nanmeanw(arr, axis=None): \"\"\" Computes", "True def nanmeanw(arr, axis=None): \"\"\" Computes nanmean without raising a", "0: return True for arr in arrays[1:]: if not (np.isnan(arrays[0])", "NaNs in the dataset \"\"\" with warnings.catch_warnings(): warnings.simplefilter(\"ignore\", category=RuntimeWarning) return", "not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return True def nanmeanw(arr,", "positions. \"\"\" if len(arrays) == 0: return True for arr", "case of NaNs in the dataset \"\"\" with warnings.catch_warnings(): warnings.simplefilter(\"ignore\",", "as np def nans_in_same_positions(*arrays): \"\"\" Compares all provided arrays to", "nanmean without raising a warning in case of NaNs in", "to see if they have NaNs in the same positions.", "the dataset \"\"\" with warnings.catch_warnings(): warnings.simplefilter(\"ignore\", category=RuntimeWarning) return np.nanmean(arr, axis=axis)", "same positions. \"\"\" if len(arrays) == 0: return True for", "def nans_in_same_positions(*arrays): \"\"\" Compares all provided arrays to see if", "provided arrays to see if they have NaNs in the", "all provided arrays to see if they have NaNs in" ]
[ "dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers", "def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_results_record(self): \"\"\"Tests", "@pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def", "errors from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type from", "DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE',", "@pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def", "def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests", "def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests", "test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\"", "test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for", "\"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass", "request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From", "authorization searches.\"\"\" import pytest from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging,", "= proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE", "test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\"", "proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description", "proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE =", "PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION", "'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template", "catalog' create_form.description = 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def", "def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\")", "of authorization searches.\"\"\" import pytest from ..utilities.general import is_never_authz, is_no_authz,", "@pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\",", "dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import", "test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self):", "= request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented", "\"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass", "pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\")", "Type from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime", "from dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION, proxy_example from", "@pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ',", "@pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def", "request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search =", "@pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class", "def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING',", "@pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): #", "TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\"", "authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager(", "request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\"", "CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier':", "search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented", "for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented", "\"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass", "test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_results_record(self): \"\"\"Tests get_vault_search_results_record\"\"\"", "def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests", "'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config =", "request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search =", "is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives", "test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object):", "class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests", "pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\")", "def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template", "authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class", "PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\",", "'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config", "get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented", "is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import", "pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE',", "class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests", "'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config =", "request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From", "= Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ',", "@pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def", "class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests", "get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented", "test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\"", "'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param", "vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager(", "TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\"", "test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_results_record(self):", "create_form.display_name = 'Test catalog' create_form.description = 'Test catalog description' request.cls.catalog", "\"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self):", "VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test')", "from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type from dlkit.runtime", "= request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): #", "'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config", "test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self):", "'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param", "PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority':", "\"\"\"Unit tests of authorization searches.\"\"\" import pytest from ..utilities.general import", "for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented", "def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests", "test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\"", "implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description =", "import Type from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import", "from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives", "pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test')", "dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST =", "@pytest.mark.skip('unimplemented test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def", "test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM',", "class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search", "= PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})", "import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY", "class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests", "AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test')", "= Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name =", "pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test')", "catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\")", "create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description = 'Test", "= request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description = 'Test catalog", "dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST)", "uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import Id", "request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search()", "@pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def", "import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest()", "request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\"", "TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\"", "\"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\"", "def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr =", "'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request):", "get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented", "\"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_results_record(self): \"\"\"Tests get_vault_search_results_record\"\"\" pass", "'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr", "test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object):", "import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors from", "\"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass", "@pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def", "test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for", "\"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass", "order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\",", "from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST", "request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request):", "\"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass", "test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config)", "'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template", "# From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object):", "= request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented", "'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr", "Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test", "@pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\",", "searches.\"\"\" import pytest from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only", "create_form.description = 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down():", "request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search()", "REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION)", "PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace':", "@pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): #", "'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])", "def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\")", "@pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def", "..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors", "test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self):", "def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests", "From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests", "@pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class", "def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests", "pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test')", "uses_filesystem_only from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import Id from", "get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def", "def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template", "\"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass", "test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ',", "AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test')", "@pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def", "\"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass", "'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down)", "tests of authorization searches.\"\"\" import pytest from ..utilities.general import is_never_authz,", "request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form", "get_authorization_search_record\"\"\" pass @pytest.mark.usefixtures(\"authorization_search_results_class_fixture\", \"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented", "params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From", "Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ',", "\"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass", "CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT',", "\"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self):", "order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\",", "pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test')", "TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\"", "pytest from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid", "<gh_stars>1-10 \"\"\"Unit tests of authorization searches.\"\"\" import pytest from ..utilities.general", "= PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT',", "= request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form =", "\"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self):", "test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\"", "= request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def vault_search_test_fixture(request): #", "proxy_example from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION =", "test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for", "request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name", "# From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION',", "test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\"", "@pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def", "search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented", "= 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident)", "pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): \"\"\"Tests get_vault_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test')", "def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr =", "import errors from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type", "request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([])", "\"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): \"\"\"Tests get_vault_search_record\"\"\" pass", "def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests", "'Test catalog' create_form.description = 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form)", "get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): \"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\",", "From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY,", "@pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def", "\"\"\"Tests get_authorization_search_results_record\"\"\" pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])", "'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING',", "test') def test_get_vaults(self): \"\"\"Tests get_vaults\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self):", "request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request):", "test') def test_order_vault_results(self): \"\"\"Tests order_vault_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self):", "VaultSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): \"\"\"Tests search_among_vaults\"\"\" pass @pytest.mark.skip('unimplemented test')", "Id from dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION, proxy_example", "'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog'", "description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def", "Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY =", "request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description = 'Test catalog description'", "= 'Test catalog' create_form.description = 'Test catalog description' request.cls.catalog =", "test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for", "for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented", "test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\"", "params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From", "vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class", "'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def", "\"\"\"Tests get_vault_search_record\"\"\" pass @pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\"", "\"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass", "@pytest.mark.usefixtures(\"vault_search_results_class_fixture\", \"vault_search_results_test_fixture\") class TestVaultSearchResults(object): \"\"\"Tests for VaultSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def", "class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope=\"function\") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search", "for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): \"\"\"Tests search_among_authorizations\"\"\" pass @pytest.mark.skip('unimplemented", "from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import", "From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests", "dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type from dlkit.runtime import", "pass @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request):", "test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self):", "def test_get_authorizations(self): \"\"\"Tests get_authorizations\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests", "\"authorization_search_results_test_fixture\") class TestAuthorizationSearchResults(object): \"\"\"Tests for AuthorizationSearchResults\"\"\" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self):", "import pytest from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from", "import Id from dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION,", "# From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object):", "def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): \"\"\"Tests", "'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope=\"class\", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM',", "test') def test_order_authorization_results(self): \"\"\"Tests order_authorization_results\"\"\" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self):", "request.cls.catalog.get_vault_search() @pytest.mark.usefixtures(\"vault_search_class_fixture\", \"vault_search_test_fixture\") class TestVaultSearch(object): \"\"\"Tests for VaultSearch\"\"\" @pytest.mark.skip('unimplemented test')", "request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures(\"authorization_search_class_fixture\", \"authorization_search_test_fixture\") class TestAuthorizationSearch(object): \"\"\"Tests for AuthorizationSearch\"\"\" @pytest.mark.skip('unimplemented test')", "from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition()", "pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): \"\"\"Tests get_authorization_query_inspector\"\"\" pass @pytest.mark.skip('unimplemented test')" ]
[ "autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the geometry from the", "vrc_locs=(0,)): \"\"\" Read the geometry from the filesys \"\"\" vrc_fs", "if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str = None return", "NEW: Handle flux files \"\"\" import autofile def read_flux(ts_save_path, vrc_locs=(0,)):", "from the filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str", "autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str = None", "vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str = None return flux_str", "flux files \"\"\" import autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read", "<filename>mechroutines/models/_flux.py \"\"\" NEW: Handle flux files \"\"\" import autofile def", "def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the geometry from the filesys", "\"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else:", "vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str", "\"\"\" NEW: Handle flux files \"\"\" import autofile def read_flux(ts_save_path,", "Handle flux files \"\"\" import autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\"", "\"\"\" import autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the geometry", "read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the geometry from the filesys \"\"\"", "\"\"\" Read the geometry from the filesys \"\"\" vrc_fs =", "Read the geometry from the filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path)", "import autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the geometry from", "the filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str =", "filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs)", "= autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str =", "files \"\"\" import autofile def read_flux(ts_save_path, vrc_locs=(0,)): \"\"\" Read the", "the geometry from the filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if", "geometry from the filesys \"\"\" vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs):" ]
[ "supported: - gini - entropy For regression tree following criterion", "the prediction is averge of all estimator predictions. Args: n_estimators", "== 'mae': self.is_classification_forest = False else: raise Exception(\"Invalid criterion: {}\".format(self.criterion))", "preds y_preds = [] for preds in all_preds: if self.is_classification_forest:", "i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub =", "range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return", "which estimators needs to be constructed. Default: np.inf min_samples_split Minimum", "np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1) num_samples = X.shape[0]", "total features. - If regressor, default is total number of", "error) - mae (mean absolute error) Default: gini random_seed random", "len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) Xy = np.concatenate((X,", "self.is_classification_forest = False if self.criterion == 'gini' or self.criterion ==", "of base estimators (Decision Trees here) max_features Maximum features to", "RandomForest(BaseEstimator): \"\"\" Simple implementation of Random Forest. This class has", "X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds", "# Bagging - choose random features for each estimator #", "enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] = preds y_preds =", "tree the prediction is averge of all estimator predictions. Args:", "'entropy': self.is_classification_forest = True elif self.criterion == 'mse' or self.criterion", "fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy()", "self.criterion == 'entropy': self.is_classification_forest = True elif self.criterion == 'mse'", "is total number of features. max_depth The maximum depth to", "BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree", "Trees here) max_features Maximum features to be used to construct", "DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple implementation of Random Forest. This", "the node. Default: 2 criterion criterion to be used for", "be constructed. Default: np.inf min_samples_split Minimum number of samples need", "2 criterion criterion to be used for split. For classification", "for split at the node. Default: 2 criterion criterion to", "= preds y_preds = [] for preds in all_preds: if", "in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub,", "is provided, else use square root of # total number", "replace=True ) subsets.append([X[idx], y[idx]]) return subsets def fit(self, X, y):", "self.idxs = [] self.trees = [] for i in range(self.n_estimators):", "base estimators (Decision Trees here) max_features Maximum features to be", "as np import pandas as pd from sklearn.base import BaseEstimator", "is square root of total features. - If regressor, default", "max_depth The maximum depth to which estimators needs to be", "following criterion are supported: - mse (mean squared error) -", "(mean absolute error) Default: gini random_seed random seed value for", "\"\"\" def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators", "random seed value for numpy operations. Default: 0 \"\"\" def", "Forest. This class has implementation for Random Forest classifier and", "prediction is averge of all estimator predictions. Args: n_estimators Number", "= False if self.criterion == 'gini' or self.criterion == 'entropy':", "y_sub) def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i,", "self.max_features = int(X.shape[1]) # Bagging - choose random features for", "entropy For regression tree following criterion are supported: - mse", "absolute error) Default: gini random_seed random seed value for numpy", "X = X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features", ") subsets.append([X[idx], y[idx]]) return subsets def fit(self, X, y): np.random.seed(self.random_seed)", "def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree", "majority vote. For regression tree the prediction is averge of", "classification tree following criterion are supported: - gini - entropy", "i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion,", "[] for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else: y_preds.append(np.average(preds))", "self.criterion == 'mae': self.is_classification_forest = False else: raise Exception(\"Invalid criterion:", "[] if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) Xy", "number of samples need to present for split at the", "- gini - entropy For regression tree following criterion are", "for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else: y_preds.append(np.average(preds)) return", "- entropy For regression tree following criterion are supported: -", "max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False", "'mse' or self.criterion == 'mae': self.is_classification_forest = False else: raise", "criterion criterion to be used for split. For classification tree", "= np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return subsets def", "0 \"\"\" def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0):", "n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features", "idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True )", "self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def", "os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple", "pd from sklearn.base import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree'))", "- If regressor, default is total number of features. max_depth", "min_samples_split Minimum number of samples need to present for split", "with replacement. For classification the prediction is by majority vote.", "== 'mse' or self.criterion == 'mae': self.is_classification_forest = False else:", "self.max_features = max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion", "gini random_seed random seed value for numpy operations. Default: 0", "Bagging - choose random features for each estimator # if", "def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X =", "def get_subsets(self, X, y, num=1): subsets = [] if len(np.shape(y))", "n_estimators Number of base estimators (Decision Trees here) max_features Maximum", "self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed = random_seed self.idxs", "as pd from sklearn.base import BaseEstimator import sys import os", "predictions. Args: n_estimators Number of base estimators (Decision Trees here)", "- mae (mean absolute error) Default: gini random_seed random seed", "self.is_classification_forest = True elif self.criterion == 'mse' or self.criterion ==", "'mae': self.is_classification_forest = False else: raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def", "np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees): preds = tree.predict(X)", "The maximum depth to which estimators needs to be constructed.", "following criterion are supported: - gini - entropy For regression", "total number of features. for i, _ in enumerate(self.trees): self.trees[i].max_features", "X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees):", "by majority vote. For regression tree the prediction is averge", "choose random features for each estimator # if max_features is", "_ in range(num): idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)),", "random_seed self.idxs = [] self.trees = [] for i in", "pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if", "y[idx]]) return subsets def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X,", "random choice with replacement. For classification the prediction is by", "random_seed = self.random_seed)) self.is_classification_forest = False if self.criterion == 'gini'", "[] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features =", "of samples need to present for split at the node.", "import DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple implementation of Random Forest.", "and regressor. Dataset bagging is done by simple numpy random", "criterion are supported: - gini - entropy For regression tree", "from DecisionTree import DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple implementation of", "[] self.trees = [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth,", "max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion = criterion", "size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return subsets", "self.is_classification_forest = False else: raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self,", "gini - entropy For regression tree following criterion are supported:", "in range(num): idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ),", "each estimator # if max_features is provided, else use square", "bagging is done by simple numpy random choice with replacement.", "at the node. Default: 2 criterion criterion to be used", "For regression tree following criterion are supported: - mse (mean", "import pandas as pd from sklearn.base import BaseEstimator import sys", "root of # total number of features. for i, _", "numpy as np import pandas as pd from sklearn.base import", "For regression tree the prediction is averge of all estimator", "node. Default: 2 criterion criterion to be used for split.", "for _ in range(num): idx = rng.choice( range(num_samples), size =", "- mse (mean squared error) - mae (mean absolute error)", "self.n_estimators) if self.max_features == 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1]))", "min_samples_split self.criterion = criterion self.random_seed = random_seed self.idxs = []", "self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for", "= self.get_subsets(X, y, self.n_estimators) if self.max_features == 0: if self.is_classification_forest:", "here) max_features Maximum features to be used to construct tree.", "classifier, default is square root of total features. - If", "from sklearn.base import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from", "\"\"\" Simple implementation of Random Forest. This class has implementation", "square root of # total number of features. for i,", "of features. max_depth The maximum depth to which estimators needs", "if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) #", "= max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion =", "self.n_estimators)) for i, tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:,", "y, self.n_estimators) if self.max_features == 0: if self.is_classification_forest: self.max_features =", "is done by simple numpy random choice with replacement. For", "self.criterion == 'gini' or self.criterion == 'entropy': self.is_classification_forest = True", "samples need to present for split at the node. Default:", "class RandomForest(BaseEstimator): \"\"\" Simple implementation of Random Forest. This class", "np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X,", "rng = np.random.default_rng(seed= self.random_seed) for _ in range(num): idx =", "all estimator predictions. Args: n_estimators Number of base estimators (Decision", "= np.random.default_rng(seed= self.random_seed) for _ in range(num): idx = rng.choice(", "choice with replacement. For classification the prediction is by majority", "self.get_subsets(X, y, self.n_estimators) if self.max_features == 0: if self.is_classification_forest: self.max_features", "# total number of features. for i, _ in enumerate(self.trees):", "self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging", "= [] self.trees = [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth=", "import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import", "split at the node. Default: 2 criterion criterion to be", "elif self.criterion == 'mse' or self.criterion == 'mae': self.is_classification_forest =", "if self.max_features == 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else:", "self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging -", "= np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1) num_samples =", "'gini' or self.criterion == 'entropy': self.is_classification_forest = True elif self.criterion", "num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _", "int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging - choose random", "Default: gini random_seed random seed value for numpy operations. Default:", "import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class", "self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed", "= random_seed self.idxs = [] self.trees = [] for i", "square root of total features. - If regressor, default is", "the prediction is by majority vote. For regression tree the", "tree following criterion are supported: - mse (mean squared error)", "supported: - mse (mean squared error) - mae (mean absolute", "if self.criterion == 'gini' or self.criterion == 'entropy': self.is_classification_forest =", "Random Forest classifier and regressor. Dataset bagging is done by", "subsets.append([X[idx], y[idx]]) return subsets def fit(self, X, y): np.random.seed(self.random_seed) if", "estimator # if max_features is provided, else use square root", "default is total number of features. max_depth The maximum depth", "classification the prediction is by majority vote. For regression tree", "classifier and regressor. Dataset bagging is done by simple numpy", "for numpy operations. Default: 0 \"\"\" def __init__(self, n_estimators, max_features=0,", "= int(X.shape[1]) # Bagging - choose random features for each", "y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets =", "sklearn.base import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree", "= int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging - choose", "axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for", "to be used to construct tree. Default: - If classifier,", "True elif self.criterion == 'mse' or self.criterion == 'mae': self.is_classification_forest", "all_preds[:, i] = preds y_preds = [] for preds in", "is by majority vote. For regression tree the prediction is", "Dataset bagging is done by simple numpy random choice with", "operations. Default: 0 \"\"\" def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2,", "Random Forest. This class has implementation for Random Forest classifier", "math import numpy as np import pandas as pd from", "error) Default: gini random_seed random seed value for numpy operations.", "import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator): \"\"\"", "False if self.criterion == 'gini' or self.criterion == 'entropy': self.is_classification_forest", "random features for each estimator # if max_features is provided,", "for i, tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i]", "to construct tree. Default: - If classifier, default is square", "if max_features is provided, else use square root of #", "max_features is provided, else use square root of # total", "enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub)", "sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple implementation", "self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X):", "prediction is by majority vote. For regression tree the prediction", "y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed)", "number of features. for i, _ in enumerate(self.trees): self.trees[i].max_features =", "random_seed random seed value for numpy operations. Default: 0 \"\"\"", "= [] if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1)", "squared error) - mae (mean absolute error) Default: gini random_seed", "import math import numpy as np import pandas as pd", "total number of features. max_depth The maximum depth to which", "numpy operations. Default: 0 \"\"\" def __init__(self, n_estimators, max_features=0, max_depth=np.inf,", "max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features = max_features", "random_seed=0): self.n_estimators = n_estimators self.max_features = max_features self.max_depth = max_depth", "else use square root of # total number of features.", "seed value for numpy operations. Default: 0 \"\"\" def __init__(self,", "min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest =", "= False else: raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self, X,", "= np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees): preds =", "Args: n_estimators Number of base estimators (Decision Trees here) max_features", "numpy random choice with replacement. For classification the prediction is", "estimators (Decision Trees here) max_features Maximum features to be used", "mae (mean absolute error) Default: gini random_seed random seed value", "self.criterion == 'mse' or self.criterion == 'mae': self.is_classification_forest = False", "features to be used to construct tree. Default: - If", "= tree.predict(X) all_preds[:, i] = preds y_preds = [] for", "min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features = max_features self.max_depth", "default is square root of total features. - If regressor,", "DecisionTree import DecisionTree class RandomForest(BaseEstimator): \"\"\" Simple implementation of Random", "- If classifier, default is square root of total features.", "np import pandas as pd from sklearn.base import BaseEstimator import", "constructed. Default: np.inf min_samples_split Minimum number of samples need to", "self.criterion = criterion self.random_seed = random_seed self.idxs = [] self.trees", "X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features == 0:", "of total features. - If regressor, default is total number", "to present for split at the node. Default: 2 criterion", "Default: 2 criterion criterion to be used for split. For", "(mean squared error) - mae (mean absolute error) Default: gini", "pandas as pd from sklearn.base import BaseEstimator import sys import", "averge of all estimator predictions. Args: n_estimators Number of base", "If classifier, default is square root of total features. -", "isinstance(X, pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators)", "For classification the prediction is by majority vote. For regression", "in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] = preds y_preds", "Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets =", "if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) Xy =", "rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]])", "raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets", "provided, else use square root of # total number of", "of # total number of features. for i, _ in", "value for numpy operations. Default: 0 \"\"\" def __init__(self, n_estimators,", "= n_estimators self.max_features = max_features self.max_depth = max_depth self.min_samples_split =", "y_preds = [] for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax())", "== 1: y = np.expand_dims(y, axis=1) Xy = np.concatenate((X, y),", "= self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False if", "n_estimators self.max_features = max_features self.max_depth = max_depth self.min_samples_split = min_samples_split", "estimators needs to be constructed. Default: np.inf min_samples_split Minimum number", "axis=1) Xy = np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy)", "estimator predictions. Args: n_estimators Number of base estimators (Decision Trees", "criterion: {}\".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets = []", "0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1])", "for Random Forest classifier and regressor. Dataset bagging is done", "def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators =", "to be constructed. Default: np.inf min_samples_split Minimum number of samples", "criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features = max_features self.max_depth =", "or self.criterion == 'entropy': self.is_classification_forest = True elif self.criterion ==", "used for split. For classification tree following criterion are supported:", "= X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _ in", "maximum depth to which estimators needs to be constructed. Default:", "regressor, default is total number of features. max_depth The maximum", "regressor. Dataset bagging is done by simple numpy random choice", "of features. for i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features", "y, num=1): subsets = [] if len(np.shape(y)) == 1: y", "preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else: y_preds.append(np.average(preds)) return y_preds", "y = np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1) num_samples", "This class has implementation for Random Forest classifier and regressor.", "__init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators", "Simple implementation of Random Forest. This class has implementation for", "= [] for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else:", "np.inf min_samples_split Minimum number of samples need to present for", "Default: 0 \"\"\" def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini',", "to be used for split. For classification tree following criterion", "= subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds = np.empty((X.shape[0],", "), replace=True ) subsets.append([X[idx], y[idx]]) return subsets def fit(self, X,", "Default: - If classifier, default is square root of total", "of Random Forest. This class has implementation for Random Forest", "tree.predict(X) all_preds[:, i] = preds y_preds = [] for preds", "criterion are supported: - mse (mean squared error) - mae", "number of features. max_depth The maximum depth to which estimators", "= criterion self.random_seed = random_seed self.idxs = [] self.trees =", "max_depth self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed = random_seed", "present for split at the node. Default: 2 criterion criterion", "X, y, num=1): subsets = [] if len(np.shape(y)) == 1:", "subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators))", "range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed =", "regression tree following criterion are supported: - mse (mean squared", "implementation for Random Forest classifier and regressor. Dataset bagging is", "split. For classification tree following criterion are supported: - gini", "self.max_features == 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features", "class has implementation for Random Forest classifier and regressor. Dataset", "features for each estimator # if max_features is provided, else", "construct tree. Default: - If classifier, default is square root", "root of total features. - If regressor, default is total", "implementation of Random Forest. This class has implementation for Random", "- choose random features for each estimator # if max_features", "vote. For regression tree the prediction is averge of all", "has implementation for Random Forest classifier and regressor. Dataset bagging", "np.random.default_rng(seed= self.random_seed) for _ in range(num): idx = rng.choice( range(num_samples),", "y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds =", "= True elif self.criterion == 'mse' or self.criterion == 'mae':", "subsets def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X", "need to present for split at the node. Default: 2", "Minimum number of samples need to present for split at", "{}\".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets = [] if", "= [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features", "self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest", "subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features == 0: if", "replacement. For classification the prediction is by majority vote. For", "= X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features ==", "sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator):", "False else: raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self, X, y,", "regression tree the prediction is averge of all estimator predictions.", "1: y = np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1)", "for i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub", "features. for i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub,", "is averge of all estimator predictions. Args: n_estimators Number of", "else: raise Exception(\"Invalid criterion: {}\".format(self.criterion)) def get_subsets(self, X, y, num=1):", "done by simple numpy random choice with replacement. For classification", "int(X.shape[1]) # Bagging - choose random features for each estimator", "use square root of # total number of features. for", "used to construct tree. Default: - If classifier, default is", "Forest classifier and regressor. Dataset bagging is done by simple", "X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets", "Xy = np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng", "np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _ in range(num): idx", "depth to which estimators needs to be constructed. Default: np.inf", "= max_depth self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed =", "criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False if self.criterion ==", "self.random_seed)) self.is_classification_forest = False if self.criterion == 'gini' or self.criterion", "Maximum features to be used to construct tree. Default: -", "mse (mean squared error) - mae (mean absolute error) Default:", "predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree in", "features. - If regressor, default is total number of features.", "import numpy as np import pandas as pd from sklearn.base", "= rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx],", "self.trees = [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split,", "are supported: - gini - entropy For regression tree following", "max_features Maximum features to be used to construct tree. Default:", "subsets = [] if len(np.shape(y)) == 1: y = np.expand_dims(y,", "(Decision Trees here) max_features Maximum features to be used to", "by simple numpy random choice with replacement. For classification the", "in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed", "== 'gini' or self.criterion == 'entropy': self.is_classification_forest = True elif", "simple numpy random choice with replacement. For classification the prediction", "= self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self,", "for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features,", "for each estimator # if max_features is provided, else use", "Default: np.inf min_samples_split Minimum number of samples need to present", "self.n_estimators = n_estimators self.max_features = max_features self.max_depth = max_depth self.min_samples_split", "np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return subsets def fit(self,", "all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees): preds", "tree. Default: - If classifier, default is square root of", "i, tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] =", "self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False if self.criterion", "get_subsets(self, X, y, num=1): subsets = [] if len(np.shape(y)) ==", "to which estimators needs to be constructed. Default: np.inf min_samples_split", "of all estimator predictions. Args: n_estimators Number of base estimators", "or self.criterion == 'mae': self.is_classification_forest = False else: raise Exception(\"Invalid", "i] = preds y_preds = [] for preds in all_preds:", "If regressor, default is total number of features. max_depth The", "np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed=", "= self.random_seed)) self.is_classification_forest = False if self.criterion == 'gini' or", "self.random_seed = random_seed self.idxs = [] self.trees = [] for", "criterion self.random_seed = random_seed self.idxs = [] self.trees = []", "For classification tree following criterion are supported: - gini -", "== 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features =", "Number of base estimators (Decision Trees here) max_features Maximum features", "max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features =", "num=1): subsets = [] if len(np.shape(y)) == 1: y =", "return subsets def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame):", "preds = tree.predict(X) all_preds[:, i] = preds y_preds = []", "_ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i]", "self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed))", "X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _ in range(num):", "be used for split. For classification tree following criterion are", "features. max_depth The maximum depth to which estimators needs to", "else: self.max_features = int(X.shape[1]) # Bagging - choose random features", "# if max_features is provided, else use square root of", "tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] = preds", "for split. For classification tree following criterion are supported: -", "tree following criterion are supported: - gini - entropy For", "if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X, y,", "range(num): idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True", "criterion to be used for split. For classification tree following", "== 'entropy': self.is_classification_forest = True elif self.criterion == 'mse' or", "are supported: - mse (mean squared error) - mae (mean", "= min_samples_split self.criterion = criterion self.random_seed = random_seed self.idxs =", "= np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng =", "be used to construct tree. Default: - If classifier, default", "needs to be constructed. Default: np.inf min_samples_split Minimum number of", "self.random_seed) for _ in range(num): idx = rng.choice( range(num_samples), size" ]
[ "print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try: v = next(g)", "1: yield i i += 1 g = gen() try:", "v) # It's legal to pend exception in a just-started", "just the same # as it's legal to .throw() into", "print(\"SKIP\") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try:", "while 1: yield i i += 1 g = gen()", "try: g.pend_throw except AttributeError: print(\"SKIP\") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError())", "as e: print(\"raised\", repr(e)) print(\"ret was:\", v) # It's legal", "g = gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print(\"ValueError from", "= 0 while 1: yield i i += 1 g", "g = gen() try: g.pend_throw except AttributeError: print(\"SKIP\") raise SystemExit", "0 while 1: yield i i += 1 g =", "def gen(): i = 0 while 1: yield i i", "exception in a just-started generator, just the same # as", "print(next(g)) g.pend_throw(ValueError()) v = None try: v = next(g) except", "the same # as it's legal to .throw() into it.", "raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try: v", "v = None try: v = next(g) except Exception as", "# as it's legal to .throw() into it. g =", "Exception as e: print(\"raised\", repr(e)) print(\"ret was:\", v) # It's", "print(\"ret was:\", v) # It's legal to pend exception in", "to .throw() into it. g = gen() g.pend_throw(ValueError()) try: next(g)", "v = next(g) except Exception as e: print(\"raised\", repr(e)) print(\"ret", "next(g) except Exception as e: print(\"raised\", repr(e)) print(\"ret was:\", v)", "gen(): i = 0 while 1: yield i i +=", "as it's legal to .throw() into it. g = gen()", "i i += 1 g = gen() try: g.pend_throw except", "i += 1 g = gen() try: g.pend_throw except AttributeError:", "= next(g) except Exception as e: print(\"raised\", repr(e)) print(\"ret was:\",", "pend exception in a just-started generator, just the same #", "just-started generator, just the same # as it's legal to", "None try: v = next(g) except Exception as e: print(\"raised\",", "it. g = gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print(\"ValueError", "legal to .throw() into it. g = gen() g.pend_throw(ValueError()) try:", "g.pend_throw(ValueError()) v = None try: v = next(g) except Exception", "It's legal to pend exception in a just-started generator, just", ".throw() into it. g = gen() g.pend_throw(ValueError()) try: next(g) except", "it's legal to .throw() into it. g = gen() g.pend_throw(ValueError())", "i = 0 while 1: yield i i += 1", "yield i i += 1 g = gen() try: g.pend_throw", "= None try: v = next(g) except Exception as e:", "AttributeError: print(\"SKIP\") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None", "print(\"raised\", repr(e)) print(\"ret was:\", v) # It's legal to pend", "a just-started generator, just the same # as it's legal", "into it. g = gen() g.pend_throw(ValueError()) try: next(g) except ValueError:", "g.pend_throw except AttributeError: print(\"SKIP\") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v", "try: v = next(g) except Exception as e: print(\"raised\", repr(e))", "gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print(\"ValueError from just-started gen\")", "was:\", v) # It's legal to pend exception in a", "except Exception as e: print(\"raised\", repr(e)) print(\"ret was:\", v) #", "# It's legal to pend exception in a just-started generator,", "generator, just the same # as it's legal to .throw()", "legal to pend exception in a just-started generator, just the", "except AttributeError: print(\"SKIP\") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v =", "repr(e)) print(\"ret was:\", v) # It's legal to pend exception", "in a just-started generator, just the same # as it's", "= gen() try: g.pend_throw except AttributeError: print(\"SKIP\") raise SystemExit print(next(g))", "e: print(\"raised\", repr(e)) print(\"ret was:\", v) # It's legal to", "1 g = gen() try: g.pend_throw except AttributeError: print(\"SKIP\") raise", "to pend exception in a just-started generator, just the same", "= gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print(\"ValueError from just-started", "+= 1 g = gen() try: g.pend_throw except AttributeError: print(\"SKIP\")", "SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try: v =", "gen() try: g.pend_throw except AttributeError: print(\"SKIP\") raise SystemExit print(next(g)) print(next(g))", "same # as it's legal to .throw() into it. g" ]
[ "x in range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace", "Actions, State): \"\"\" Execute `Actions` on `State`. Parameters ---------- Actions", "return Duplicate def __deepcopy__(self, memo): Default = None Exists =", "= deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime,", "self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack", "for the missile launcher unit class MissileClass(ProjectileClass): def __init__(self, ID,", "possibleActions(self, State): \"\"\" Identifies the set of feasible actions given", "# Will be used as the projectile for the missile", "the feasible actions \"\"\" return self.ActionOptions def observe(self, Unit): if", "ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo)", "of executed `Actions`. \"\"\" NewState = deepcopy(State) Changes = []", "Class This is a subclass to the UnitClass Virtual Functions", "feasible actions given the board size and position of the", "y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State):", "number of turns until the unit is dead \"\"\" def", "says VisibleRange: how far the unit can observe Actions: dict", "is attacking in an advance action RemaingLifetime: int that defines", "Virtual Functions ----------------- - `__copy__()` to make shallow copies -", "1 if isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult) return", "Extent=(1,1)) self.Actions = { \"advance\": lambda x: advance(self, x) }", "Duplicate def __deepcopy__(self, memo): Default = None Exists = memo.get(self,", "can observe Actions: dict dictionary of actions common accross all", "( \"advance\", ), ) self.Attack = None self.RemainingLifetime = RemainingLifetime", "`Actions` on `State`. Parameters ---------- Actions : list[str] A set", "Exists = memo.get(self, Default) if Exists is not Default: return", "State: StateClass Returns ------- TrueActions: list[str] A list of the", "Duplicate def possibleActions(self, State): \"\"\" Identifies the set of feasible", "projectile for the missile launcher unit class MissileClass(ProjectileClass): def __init__(self,", "\"advance\", ), ) self.Attack = None self.RemainingLifetime = RemainingLifetime def", "legal actions - `observe(Unit)` to observe units located within VisibleRange", "StateClass Returns ------- TrueActions: list[str] A list of the feasible", "copies - `possibleActions(State)` to identify legal actions - `observe(Unit)` to", "on `State`. State : StateClass State on which to inflict", "Parameters ---------- State: StateClass Returns ------- TrueActions: list[str] A list", "y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y,", "Parameters ---------- Actions : list[str] A set of actions to", "set of feasible actions given the board size and position", ": list Resulting state of executed `Actions`. \"\"\" NewState =", "deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate def possibleActions(self, State):", "with another unit - `execute(Action, State)` to execute the action", "x: advance(self, x) } self.ActionOptions = ( ( \"advance\", ),", "= None Exists = memo.get(self, Default) if Exists is not", "memo) memo[self] = Duplicate return Duplicate def possibleActions(self, State): \"\"\"", "self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack =", "import UnitClass, advance from copy import deepcopy import math class", "Projectile Class This is a subclass to the UnitClass Virtual", "whether the unit is attacking in an advance action RemaingLifetime:", "list Resulting state of executed `Actions`. \"\"\" NewState = deepcopy(State)", "launcher unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1):", "of the unit Extent: the space occupied by unit Position:", "Default) if Exists is not Default: return Exists Duplicate =", "memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self]", "Dec 15 09:49:47 2020 @author: james.z.hare \"\"\" from src.UnitModule import", "= self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate", "deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo)", "int that defines whether the unit is attacking in an", "advance(self, x) } self.ActionOptions = ( ( \"advance\", ), )", "to make shallow copies - `__deepcopy__(memo)` to make deep copies", "Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default =", "for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions,", "src.UnitModule import UnitClass, advance from copy import deepcopy import math", "make deep copies - `possibleActions(State)` to identify legal actions -", "isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult) return Changes #", "unit can observe Actions: dict dictionary of actions common accross", "list): Changes += ActionResult else: Changes.append(ActionResult) return Changes # Will", "`__copy__()` to make shallow copies - `__deepcopy__(memo)` to make deep", "- `__deepcopy__(memo)` to make deep copies - `possibleActions(State)` to identify", "actions to be performed on `State`. State : StateClass State", "as the name says VisibleRange: how far the unit can", "`__deepcopy__(memo)` to make deep copies - `possibleActions(State)` to identify legal", "set of actions to be performed on `State`. State :", "observe units located within VisibleRange - `overlaps(Unit)` to identify if", "def observe(self, Unit): if Unit.ID == self.ID: return Unit return", "\"\"\" def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner,", "= Duplicate return Duplicate def possibleActions(self, State): \"\"\" Identifies the", "`Actions`. \"\"\" NewState = deepcopy(State) Changes = [] for Action", "the missile launcher unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner,", "range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([", "list of list of action options. Attack: int that defines", "the unit Extent: the space occupied by unit Position: location", "__init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life)", "Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions = {", "\"\"\" Created on Tue Dec 15 09:49:47 2020 @author: james.z.hare", "State : StateClass State on which to inflict actions. Returns", "the player the unit belongs to Health: the health of", "= set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for", "- `possibleActions(State)` to identify legal actions - `observe(Unit)` to observe", "Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions =", "ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions", "---------- Actions : list[str] A set of actions to be", "copies - `__deepcopy__(memo)` to make deep copies - `possibleActions(State)` to", "+= ActionResult else: Changes.append(ActionResult) return Changes # Will be used", "to identify if the unit overlaps with another unit -", "self.ActionOptions = ( ( \"advance\", ), ) self.Attack = None", "= deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate def possibleActions(self,", "turns until the unit is dead \"\"\" def __init__(self, ID,", "how far the unit can observe Actions: dict dictionary of", "memo.get(self, Default) if Exists is not Default: return Exists Duplicate", "(self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in", "the unit can observe Actions: dict dictionary of actions common", "list of the feasible actions \"\"\" return self.ActionOptions def observe(self,", ": StateClass State on which to inflict actions. Returns -------", "= self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime =", "Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate def", "to Health: the health of the unit Extent: the space", "= { \"advance\": lambda x: advance(self, x) } self.ActionOptions =", "occupied by unit Position: location of unit Orientation: as the", "be performed on `State`. State : StateClass State on which", "in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult,", "TrueActions: list[str] A list of the feasible actions \"\"\" return", "deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return", "action RemaingLifetime: int that defines the total number of turns", "if Exists is not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID,", "def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position", "StateClass State on which to inflict actions. Returns ------- Changes", "Resulting state of executed `Actions`. \"\"\" NewState = deepcopy(State) Changes", "class ProjectileClass(UnitClass): \"\"\" The Projectile Class This is a subclass", "= [] for Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime", "execute(self, Actions, State): \"\"\" Execute `Actions` on `State`. Parameters ----------", "---------- ID: a unique identifier of this unit Owner: the", "Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list):", "-= 1 if isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult)", "memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime", "int that defines the total number of turns until the", "used as the projectile for the missile launcher unit class", "unit Extent: the space occupied by unit Position: location of", "return None def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y,", "list of action options. Attack: int that defines whether the", "unit overlaps with another unit - `execute(Action, State)` to execute", "Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self,", "on which to inflict actions. Returns ------- Changes : list", "- `observe(Unit)` to observe units located within VisibleRange - `overlaps(Unit)`", "executed `Actions`. \"\"\" NewState = deepcopy(State) Changes = [] for", "feasible actions \"\"\" return self.ActionOptions def observe(self, Unit): if Unit.ID", "= None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID,", "import math class ProjectileClass(UnitClass): \"\"\" The Projectile Class This is", "memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate", "def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for", ": list[str] A set of actions to be performed on", "the unit is attacking in an advance action RemaingLifetime: int", "Health, Extent=(1,1)) self.Actions = { \"advance\": lambda x: advance(self, x)", "[] for Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -=", "self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default", "Returns ------- Changes : list Resulting state of executed `Actions`.", "Default = None Exists = memo.get(self, Default) if Exists is", "Attack: int that defines whether the unit is attacking in", "= ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position,", "identifier of this unit Owner: the player the unit belongs", "the UnitClass Virtual Functions ----------------- - `__copy__()` to make shallow", "unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self,", "of actions to be performed on `State`. State : StateClass", "deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation =", "ProjectileClass(UnitClass): \"\"\" The Projectile Class This is a subclass to", "MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID, Owner,", "in range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace =", "memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation", "self.ActionOptions def observe(self, Unit): if Unit.ID == self.ID: return Unit", "if isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult) return Changes", "return Changes # Will be used as the projectile for", "Extent: the space occupied by unit Position: location of unit", "dictionary of actions common accross all units ActionOptions: list of", "for Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1", "09:49:47 2020 @author: james.z.hare \"\"\" from src.UnitModule import UnitClass, advance", "Health: the health of the unit Extent: the space occupied", "Actions : list[str] A set of actions to be performed", "board size and position of the unit Parameters ---------- State:", "the space occupied by unit Position: location of unit Orientation:", "\"\"\" return self.ActionOptions def observe(self, Unit): if Unit.ID == self.ID:", "Changes += ActionResult else: Changes.append(ActionResult) return Changes # Will be", "given the board size and position of the unit Parameters", "= ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation", ",memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation,", "range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self,", "VisibleRange: how far the unit can observe Actions: dict dictionary", "ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes", "self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes += ActionResult", "dict dictionary of actions common accross all units ActionOptions: list", "range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for", "of action options. Attack: int that defines whether the unit", "Exists is not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo),", "lambda x: advance(self, x) } self.ActionOptions = ( ( \"advance\",", "the unit belongs to Health: the health of the unit", "observe Actions: dict dictionary of actions common accross all units", "UnitClass, advance from copy import deepcopy import math class ProjectileClass(UnitClass):", "which to inflict actions. Returns ------- Changes : list Resulting", "VisibleRange - `overlaps(Unit)` to identify if the unit overlaps with", "of unit Orientation: as the name says VisibleRange: how far", "Will be used as the projectile for the missile launcher", "a subclass to the UnitClass Virtual Functions ----------------- - `__copy__()`", "unique identifier of this unit Owner: the player the unit", "= ( ( \"advance\", ), ) self.Attack = None self.RemainingLifetime", "not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo),", "that defines the total number of turns until the unit", "\"\"\" from src.UnitModule import UnitClass, advance from copy import deepcopy", "of the feasible actions \"\"\" return self.ActionOptions def observe(self, Unit):", "------- TrueActions: list[str] A list of the feasible actions \"\"\"", "be used as the projectile for the missile launcher unit", "actions common accross all units ActionOptions: list of list of", "missile launcher unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position,", "Unit return None def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x,", "def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health,", "----------------- - `__copy__()` to make shallow copies - `__deepcopy__(memo)` to", "actions - `observe(Unit)` to observe units located within VisibleRange -", "-*- coding: utf-8 -*- \"\"\" Created on Tue Dec 15", "A list of the feasible actions \"\"\" return self.ActionOptions def", "= RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position", "} self.ActionOptions = ( ( \"advance\", ), ) self.Attack =", "x) } self.ActionOptions = ( ( \"advance\", ), ) self.Attack", "\"\"\" Identifies the set of feasible actions given the board", "deepcopy import math class ProjectileClass(UnitClass): \"\"\" The Projectile Class This", "Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if", "ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes += ActionResult else:", "__deepcopy__(self, memo): Default = None Exists = memo.get(self, Default) if", "Tue Dec 15 09:49:47 2020 @author: james.z.hare \"\"\" from src.UnitModule", "utf-8 -*- \"\"\" Created on Tue Dec 15 09:49:47 2020", "deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo)", "`observe(Unit)` to observe units located within VisibleRange - `overlaps(Unit)` to", "from copy import deepcopy import math class ProjectileClass(UnitClass): \"\"\" The", "on Tue Dec 15 09:49:47 2020 @author: james.z.hare \"\"\" from", "performed on `State`. State : StateClass State on which to", "to make deep copies - `possibleActions(State)` to identify legal actions", "\"\"\" NewState = deepcopy(State) Changes = [] for Action in", "is dead \"\"\" def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self,", "belongs to Health: the health of the unit Extent: the", "if Unit.ID == self.ID: return Unit return None def overlaps(self,", "State): \"\"\" Identifies the set of feasible actions given the", "Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] =", "overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x", "far the unit can observe Actions: dict dictionary of actions", "( ( \"advance\", ), ) self.Attack = None self.RemainingLifetime =", "`execute(Action, State)` to execute the action Attributes ---------- ID: a", "return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo))", "return Unit return None def overlaps(self, Unit): MyOccupiedSpace = set([", "self.ID: return Unit return None def overlaps(self, Unit): MyOccupiedSpace =", "identify if the unit overlaps with another unit - `execute(Action,", "------- Changes : list Resulting state of executed `Actions`. \"\"\"", "memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack", "- `__copy__()` to make shallow copies - `__deepcopy__(memo)` to make", "return Duplicate def possibleActions(self, State): \"\"\" Identifies the set of", "health of the unit Extent: the space occupied by unit", "`overlaps(Unit)` to identify if the unit overlaps with another unit", "space occupied by unit Position: location of unit Orientation: as", "unit is attacking in an advance action RemaingLifetime: int that", "list[str] A list of the feasible actions \"\"\" return self.ActionOptions", "__copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation", "accross all units ActionOptions: list of list of action options.", "for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return", "ActionOptions: list of list of action options. Attack: int that", "else: Changes.append(ActionResult) return Changes # Will be used as the", "unit Orientation: as the name says VisibleRange: how far the", "RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions = { \"advance\":", "unit Position: location of unit Orientation: as the name says", "this unit Owner: the player the unit belongs to Health:", "to identify legal actions - `observe(Unit)` to observe units located", "@author: james.z.hare \"\"\" from src.UnitModule import UnitClass, advance from copy", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Dec", "= deepcopy(State) Changes = [] for Action in Actions: ActionResult", "Created on Tue Dec 15 09:49:47 2020 @author: james.z.hare \"\"\"", "of this unit Owner: the player the unit belongs to", "RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position =", "len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): \"\"\" Execute `Actions` on `State`.", "of the unit Parameters ---------- State: StateClass Returns ------- TrueActions:", "inflict actions. Returns ------- Changes : list Resulting state of", "Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ])", "identify legal actions - `observe(Unit)` to observe units located within", "Attributes ---------- ID: a unique identifier of this unit Owner:", "the total number of turns until the unit is dead", "Identifies the set of feasible actions given the board size", "in an advance action RemaingLifetime: int that defines the total", "self.Attack = None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate =", "in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2])", ") self.Attack = None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate", "self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime", "= set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for", "set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y", "x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0", "to the UnitClass Virtual Functions ----------------- - `__copy__()` to make", "units located within VisibleRange - `overlaps(Unit)` to identify if the", "]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): \"\"\" Execute `Actions`", "coding: utf-8 -*- \"\"\" Created on Tue Dec 15 09:49:47", "= self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes +=", "-*- \"\"\" Created on Tue Dec 15 09:49:47 2020 @author:", "to execute the action Attributes ---------- ID: a unique identifier", "the name says VisibleRange: how far the unit can observe", "the unit overlaps with another unit - `execute(Action, State)` to", "actions. Returns ------- Changes : list Resulting state of executed", "is a subclass to the UnitClass Virtual Functions ----------------- -", "unit Parameters ---------- State: StateClass Returns ------- TrueActions: list[str] A", "ID, Owner, Health, Extent=(1,1)) self.Actions = { \"advance\": lambda x:", "dead \"\"\" def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID,", "== self.ID: return Unit return None def overlaps(self, Unit): MyOccupiedSpace", "the projectile for the missile launcher unit class MissileClass(ProjectileClass): def", "self.Actions = { \"advance\": lambda x: advance(self, x) } self.ActionOptions", "Changes.append(ActionResult) return Changes # Will be used as the projectile", "self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1])", "for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x,", "common accross all units ActionOptions: list of list of action", "Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return", "memo[self] = Duplicate return Duplicate def possibleActions(self, State): \"\"\" Identifies", "= self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default = None", "NewState = deepcopy(State) Changes = [] for Action in Actions:", "- `overlaps(Unit)` to identify if the unit overlaps with another", "options. Attack: int that defines whether the unit is attacking", "attacking in an advance action RemaingLifetime: int that defines the", "overlaps with another unit - `execute(Action, State)` to execute the", "= deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate", "UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions = { \"advance\": lambda", "Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position", "self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def", "until the unit is dead \"\"\" def __init__(self, ID, Owner,", "that defines whether the unit is attacking in an advance", "name says VisibleRange: how far the unit can observe Actions:", "TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0])", "units ActionOptions: list of list of action options. Attack: int", "Changes : list Resulting state of executed `Actions`. \"\"\" NewState", "of feasible actions given the board size and position of", "located within VisibleRange - `overlaps(Unit)` to identify if the unit", "is not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner", "Owner: the player the unit belongs to Health: the health", "def possibleActions(self, State): \"\"\" Identifies the set of feasible actions", "all units ActionOptions: list of list of action options. Attack:", "of list of action options. Attack: int that defines whether", "within VisibleRange - `overlaps(Unit)` to identify if the unit overlaps", "Position: location of unit Orientation: as the name says VisibleRange:", "unit Owner: the player the unit belongs to Health: the", "of turns until the unit is dead \"\"\" def __init__(self,", "size and position of the unit Parameters ---------- State: StateClass", "Actions: dict dictionary of actions common accross all units ActionOptions:", "defines the total number of turns until the unit is", "unit - `execute(Action, State)` to execute the action Attributes ----------", "Changes # Will be used as the projectile for the", "set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y", "memo): Default = None Exists = memo.get(self, Default) if Exists", "---------- State: StateClass Returns ------- TrueActions: list[str] A list of", "in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def", "as the projectile for the missile launcher unit class MissileClass(ProjectileClass):", "advance action RemaingLifetime: int that defines the total number of", "`possibleActions(State)` to identify legal actions - `observe(Unit)` to observe units", "by unit Position: location of unit Orientation: as the name", "shallow copies - `__deepcopy__(memo)` to make deep copies - `possibleActions(State)`", "\"advance\": lambda x: advance(self, x) } self.ActionOptions = ( (", "class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID,", "deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo)", "MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0])", "Unit): if Unit.ID == self.ID: return Unit return None def", "and position of the unit Parameters ---------- State: StateClass Returns", "def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID, Owner, Positon=Position,", "None Exists = memo.get(self, Default) if Exists is not Default:", "(Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in", "defines whether the unit is attacking in an advance action", "Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in", "Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health,", "to inflict actions. Returns ------- Changes : list Resulting state", "of actions common accross all units ActionOptions: list of list", "deep copies - `possibleActions(State)` to identify legal actions - `observe(Unit)`", "player the unit belongs to Health: the health of the", "= self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self, memo):", "execute the action Attributes ---------- ID: a unique identifier of", "another unit - `execute(Action, State)` to execute the action Attributes", "A set of actions to be performed on `State`. State", "observe(self, Unit): if Unit.ID == self.ID: return Unit return None", "position of the unit Parameters ---------- State: StateClass Returns -------", "an advance action RemaingLifetime: int that defines the total number", "Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation =", "unit belongs to Health: the health of the unit Extent:", "in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): \"\"\"", "action options. Attack: int that defines whether the unit is", "The Projectile Class This is a subclass to the UnitClass", "the set of feasible actions given the board size and", "This is a subclass to the UnitClass Virtual Functions -----------------", "__init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1))", "- `execute(Action, State)` to execute the action Attributes ---------- ID:", "\"\"\" The Projectile Class This is a subclass to the", "if the unit overlaps with another unit - `execute(Action, State)`", "unit is dead \"\"\" def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf):", "Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1])", "subclass to the UnitClass Virtual Functions ----------------- - `__copy__()` to", "the health of the unit Extent: the space occupied by", "make shallow copies - `__deepcopy__(memo)` to make deep copies -", "total number of turns until the unit is dead \"\"\"", "`State`. State : StateClass State on which to inflict actions.", "state of executed `Actions`. \"\"\" NewState = deepcopy(State) Changes =", "self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health)", "\"\"\" Execute `Actions` on `State`. Parameters ---------- Actions : list[str]", "self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ])", "Owner, Health, Extent=(1,1)) self.Actions = { \"advance\": lambda x: advance(self,", "`State`. Parameters ---------- Actions : list[str] A set of actions", "copy import deepcopy import math class ProjectileClass(UnitClass): \"\"\" The Projectile", "State)` to execute the action Attributes ---------- ID: a unique", "ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack", "from src.UnitModule import UnitClass, advance from copy import deepcopy import", "the board size and position of the unit Parameters ----------", "), ) self.Attack = None self.RemainingLifetime = RemainingLifetime def __copy__(self):", "advance from copy import deepcopy import math class ProjectileClass(UnitClass): \"\"\"", "= deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack,", "None def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2])", "ActionResult else: Changes.append(ActionResult) return Changes # Will be used as", "the unit Parameters ---------- State: StateClass Returns ------- TrueActions: list[str]", "return self.ActionOptions def observe(self, Unit): if Unit.ID == self.ID: return", "ID: a unique identifier of this unit Owner: the player", "to be performed on `State`. State : StateClass State on", "State on which to inflict actions. Returns ------- Changes :", "james.z.hare \"\"\" from src.UnitModule import UnitClass, advance from copy import", "Orientation: as the name says VisibleRange: how far the unit", "import deepcopy import math class ProjectileClass(UnitClass): \"\"\" The Projectile Class", "the action Attributes ---------- ID: a unique identifier of this", "Returns ------- TrueActions: list[str] A list of the feasible actions", "action Attributes ---------- ID: a unique identifier of this unit", "range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): \"\"\" Execute", "Unit.ID == self.ID: return Unit return None def overlaps(self, Unit):", "on `State`. Parameters ---------- Actions : list[str] A set of", "Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime =", "Duplicate return Duplicate def possibleActions(self, State): \"\"\" Identifies the set", "def execute(self, Actions, State): \"\"\" Execute `Actions` on `State`. Parameters", "2020 @author: james.z.hare \"\"\" from src.UnitModule import UnitClass, advance from", "actions \"\"\" return self.ActionOptions def observe(self, Unit): if Unit.ID ==", "15 09:49:47 2020 @author: james.z.hare \"\"\" from src.UnitModule import UnitClass,", "Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime", "Changes = [] for Action in Actions: ActionResult = self.Actions[Action](NewState)", "to observe units located within VisibleRange - `overlaps(Unit)` to identify", "]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x", "def __deepcopy__(self, memo): Default = None Exists = memo.get(self, Default)", "Functions ----------------- - `__copy__()` to make shallow copies - `__deepcopy__(memo)`", "Execute `Actions` on `State`. Parameters ---------- Actions : list[str] A", "Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack =", "#print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in", "return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): \"\"\" Execute `Actions` on", "self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default = None Exists", "actions given the board size and position of the unit", "deepcopy(State) Changes = [] for Action in Actions: ActionResult =", "{ \"advance\": lambda x: advance(self, x) } self.ActionOptions = (", "Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position =", "for x in range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit)", "the unit is dead \"\"\" def __init__(self, ID, Owner, Health,", "UnitClass Virtual Functions ----------------- - `__copy__()` to make shallow copies", "math class ProjectileClass(UnitClass): \"\"\" The Projectile Class This is a", "list[str] A set of actions to be performed on `State`.", "RemaingLifetime: int that defines the total number of turns until", "= memo.get(self, Default) if Exists is not Default: return Exists", "location of unit Orientation: as the name says VisibleRange: how", "State): \"\"\" Execute `Actions` on `State`. Parameters ---------- Actions :", "a unique identifier of this unit Owner: the player the", "None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner," ]
[ "print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000) car3 =", "import * def compare(car1,car2): print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2", "* def compare(car1,car2): print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2 =", "= Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000) car3 = Car(\"BMW\",\"X3\",3400000) compare(car3,car1) compare(car1,car2)", "compare(car1,car2): print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000) car3", "car import * def compare(car1,car2): print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000)", "print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000) car3 = Car(\"BMW\",\"X3\",3400000)", "from car import * def compare(car1,car2): print(car1) print(car2) car1 =", "car1 = Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000) car3 = Car(\"BMW\",\"X3\",3400000) compare(car3,car1)", "def compare(car1,car2): print(car1) print(car2) car1 = Car(\"Nissan\",\"Tiida\",450000) car2 = Car(\"Toyota\",\"Vios\",400000)" ]
[ "class Maybe(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return", "pass @monad_eq class Just(Maybe): def __init__(self, val): self.__val = val", "import Monad from prelude.decorators import monad_eq, singleton @monad_eq class Either(Monad):", "ABCMeta, abstractmethod from prelude.typeclasses import Monad from prelude.decorators import monad_eq,", "def __iter__(self): yield self.__val def __repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad):", "\"Left({})\".format(self.__val) class Right(Either): def __init__(self, val): self.__val = val def", "@abstractmethod def __iter__(self): pass @monad_eq class Just(Maybe): def __init__(self, val):", "= val def __rshift__(self, f): return f(self.__val) def __iter__(self): yield", "def __init__(self, val): self.__val = val def __rshift__(self, f): return", "__metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Just(val) @abstractmethod", "ABCMeta @classmethod def mreturn(cls, val): return Right(val) @abstractmethod def __iter__(self):", "val def __rshift__(self, f): return self def __iter__(self): return iter([])", "mreturn(cls, val): return Just(val) @abstractmethod def __iter__(self): pass @monad_eq class", "Maybe(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Just(val)", "return f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return \"Just({})\".format(self.__val)", "__rshift__(self, f): return f(self.__val) def __iter__(self): yield self.__val def __repr__(self):", "from prelude.typeclasses import Monad from prelude.decorators import monad_eq, singleton @monad_eq", "singleton @monad_eq class Either(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls,", "def __repr__(self): return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self, f):", "__iter__(self): yield self.__val def __repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__", "def __iter__(self): return iter([]) def __eq__(self, other): return type(self) ==", "self.__val = val def __rshift__(self, f): return f(self.__val) def __iter__(self):", "Either(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Right(val)", "mreturn(cls, val): return Right(val) @abstractmethod def __iter__(self): pass class Left(Either):", "@monad_eq class Just(Maybe): def __init__(self, val): self.__val = val def", "other): return type(self) == type(other) def __repr__(self): return \"Left({})\".format(self.__val) class", "@monad_eq class Either(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val):", "yield self.__val def __repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ =", "= ABCMeta @classmethod def mreturn(cls, val): return Right(val) @abstractmethod def", "return iter([]) def __eq__(self, other): return type(self) == type(other) def", "= ABCMeta @classmethod def mreturn(cls, val): return Just(val) @abstractmethod def", "Just(val) @abstractmethod def __iter__(self): pass @monad_eq class Just(Maybe): def __init__(self,", "class Right(Either): def __init__(self, val): self.__val = val def __rshift__(self,", "def __iter__(self): pass @monad_eq class Just(Maybe): def __init__(self, val): self.__val", "def __repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta @classmethod", "abc import ABCMeta, abstractmethod from prelude.typeclasses import Monad from prelude.decorators", "Right(val) @abstractmethod def __iter__(self): pass class Left(Either): def __init__(self, val):", "pass class Left(Either): def __init__(self, val): self.__val = val def", "return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls,", "self def __iter__(self): return iter([]) def __eq__(self, other): return type(self)", "return self def __iter__(self): return iter([]) def __repr__(self): return \"Nothing()\"", "self.__val = val def __rshift__(self, f): return self def __iter__(self):", "from abc import ABCMeta, abstractmethod from prelude.typeclasses import Monad from", "Left(Either): def __init__(self, val): self.__val = val def __rshift__(self, f):", "__repr__(self): return \"Left({})\".format(self.__val) class Right(Either): def __init__(self, val): self.__val =", "return \"Left({})\".format(self.__val) class Right(Either): def __init__(self, val): self.__val = val", "return self def __iter__(self): return iter([]) def __eq__(self, other): return", "@singleton class Nothing(Maybe): def __rshift__(self, f): return self def __iter__(self):", "def __rshift__(self, f): return self def __iter__(self): return iter([]) def", "def __eq__(self, other): return type(self) == type(other) def __repr__(self): return", "yield self.__val def __repr__(self): return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def", "class Just(Maybe): def __init__(self, val): self.__val = val def __rshift__(self,", "def mreturn(cls, val): return Right(val) @abstractmethod def __iter__(self): pass class", "return Right(val) @abstractmethod def __iter__(self): pass class Left(Either): def __init__(self,", "__init__(self, val): self.__val = val def __rshift__(self, f): return f(self.__val)", "val def __rshift__(self, f): return f(self.__val) def __iter__(self): yield self.__val", "import monad_eq, singleton @monad_eq class Either(Monad): __metaclass__ = ABCMeta @classmethod", "Right(Either): def __init__(self, val): self.__val = val def __rshift__(self, f):", "__rshift__(self, f): return self def __iter__(self): return iter([]) def __eq__(self,", "type(self) == type(other) def __repr__(self): return \"Left({})\".format(self.__val) class Right(Either): def", "def __iter__(self): pass class Left(Either): def __init__(self, val): self.__val =", "return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self, f): return self", "def __iter__(self): yield self.__val def __repr__(self): return \"Just({})\".format(self.__val) @singleton class", "ABCMeta @classmethod def mreturn(cls, val): return Just(val) @abstractmethod def __iter__(self):", "== type(other) def __repr__(self): return \"Left({})\".format(self.__val) class Right(Either): def __init__(self,", "__iter__(self): yield self.__val def __repr__(self): return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe):", "__repr__(self): return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self, f): return", "prelude.decorators import monad_eq, singleton @monad_eq class Either(Monad): __metaclass__ = ABCMeta", "Just(Maybe): def __init__(self, val): self.__val = val def __rshift__(self, f):", "f): return self def __iter__(self): return iter([]) def __eq__(self, other):", "return Just(val) @abstractmethod def __iter__(self): pass @monad_eq class Just(Maybe): def", "self.__val def __repr__(self): return \"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self,", "f): return self def __iter__(self): return iter([]) def __repr__(self): return", "Monad from prelude.decorators import monad_eq, singleton @monad_eq class Either(Monad): __metaclass__", "class Left(Either): def __init__(self, val): self.__val = val def __rshift__(self,", "def mreturn(cls, val): return Just(val) @abstractmethod def __iter__(self): pass @monad_eq", "__rshift__(self, f): return self def __iter__(self): return iter([]) def __repr__(self):", "f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return \"Right({})\".format(self.__val) class", "\"Just({})\".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self, f): return self def", "val): return Right(val) @abstractmethod def __iter__(self): pass class Left(Either): def", "self.__val def __repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta", "__iter__(self): return iter([]) def __eq__(self, other): return type(self) == type(other)", "@classmethod def mreturn(cls, val): return Just(val) @abstractmethod def __iter__(self): pass", "class Either(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return", "__metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Right(val) @abstractmethod", "val): return Just(val) @abstractmethod def __iter__(self): pass @monad_eq class Just(Maybe):", "= val def __rshift__(self, f): return self def __iter__(self): return", "def __rshift__(self, f): return f(self.__val) def __iter__(self): yield self.__val def", "return type(self) == type(other) def __repr__(self): return \"Left({})\".format(self.__val) class Right(Either):", "def __repr__(self): return \"Left({})\".format(self.__val) class Right(Either): def __init__(self, val): self.__val", "type(other) def __repr__(self): return \"Left({})\".format(self.__val) class Right(Either): def __init__(self, val):", "Nothing(Maybe): def __rshift__(self, f): return self def __iter__(self): return iter([])", "f): return f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return", "val): self.__val = val def __rshift__(self, f): return self def", "abstractmethod from prelude.typeclasses import Monad from prelude.decorators import monad_eq, singleton", "__init__(self, val): self.__val = val def __rshift__(self, f): return self", "__eq__(self, other): return type(self) == type(other) def __repr__(self): return \"Left({})\".format(self.__val)", "import ABCMeta, abstractmethod from prelude.typeclasses import Monad from prelude.decorators import", "prelude.typeclasses import Monad from prelude.decorators import monad_eq, singleton @monad_eq class", "@abstractmethod def __iter__(self): pass class Left(Either): def __init__(self, val): self.__val", "f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return \"Just({})\".format(self.__val) @singleton", "__iter__(self): pass class Left(Either): def __init__(self, val): self.__val = val", "\"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val):", "@classmethod def mreturn(cls, val): return Right(val) @abstractmethod def __iter__(self): pass", "monad_eq, singleton @monad_eq class Either(Monad): __metaclass__ = ABCMeta @classmethod def", "iter([]) def __eq__(self, other): return type(self) == type(other) def __repr__(self):", "return f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return \"Right({})\".format(self.__val)", "from prelude.decorators import monad_eq, singleton @monad_eq class Either(Monad): __metaclass__ =", "class Nothing(Maybe): def __rshift__(self, f): return self def __iter__(self): return", "__iter__(self): pass @monad_eq class Just(Maybe): def __init__(self, val): self.__val =", "val): self.__val = val def __rshift__(self, f): return f(self.__val) def", "__repr__(self): return \"Right({})\".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta @classmethod def" ]
[ "self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")", "try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as e: print(e)", "rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError", "= image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin() except KeyboardInterrupt: print(\"Shutting", "try: rospy.spin() except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\" if __name__", "CvBridgeError as e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args): while", "#!/usr/bin/env python from __future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2') import", "self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3)", "print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy import cv2", "while True: ic = image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin()", "ic = image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin() except KeyboardInterrupt:", "sys import rospy import cv2 from std_msgs.msg import String from", "as e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args): while True:", "sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class image_converter:", "image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin() except KeyboardInterrupt: print(\"Shutting down\")", "import rospy import cv2 from std_msgs.msg import String from sensor_msgs.msg", "\"bgr8\") except CvBridgeError as e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def", "= rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except", "print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args): while True: ic =", "def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as", "anonymous=True) \"\"\" try: rospy.spin() except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\"", "except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\" if __name__ == '__main__':", "python from __future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys", "std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import", "Image from cv_bridge import CvBridge, CvBridgeError class image_converter: def __init__(self):", "class image_converter: def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback)", "rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin() except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows()", "= CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image =", "\"\"\" try: rospy.spin() except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\" if", "from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class", "self.bridge = CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image", "cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args): while True: ic = image_converter()", "roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy import cv2 from std_msgs.msg", "rospy import cv2 from std_msgs.msg import String from sensor_msgs.msg import", "from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge", "main(args): while True: ic = image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try:", "CvBridge, CvBridgeError class image_converter: def __init__(self): self.bridge = CvBridge() self.image_sub", "cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as e: print(e) cv2.imshow(\"hola\",", "from __future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys import", "from cv_bridge import CvBridge, CvBridgeError class image_converter: def __init__(self): self.bridge", "callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as e:", "__future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy", "import Image from cv_bridge import CvBridge, CvBridgeError class image_converter: def", "import CvBridge, CvBridgeError class image_converter: def __init__(self): self.bridge = CvBridge()", "String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError", "except CvBridgeError as e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args):", "import roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy import cv2 from", "cv_image) cv2.waitKey(3) def main(args): while True: ic = image_converter() rospy.init_node('image_converter',", "def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data):", "cv2.waitKey(3) def main(args): while True: ic = image_converter() rospy.init_node('image_converter', anonymous=True)", "CvBridgeError class image_converter: def __init__(self): self.bridge = CvBridge() self.image_sub =", "<gh_stars>0 #!/usr/bin/env python from __future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2')", "cv2 from std_msgs.msg import String from sensor_msgs.msg import Image from", "def main(args): while True: ic = image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\"", "True: ic = image_converter() rospy.init_node('image_converter', anonymous=True) \"\"\" try: rospy.spin() except", "= self.bridge.imgmsg_to_cv2(data, \"bgr8\") except CvBridgeError as e: print(e) cv2.imshow(\"hola\", cv_image)", "cv_bridge import CvBridge, CvBridgeError class image_converter: def __init__(self): self.bridge =", "roslib.load_manifest('msgs_to_cv2') import sys import rospy import cv2 from std_msgs.msg import", "KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\" if __name__ == '__main__': main(sys.argv)", "import cv2 from std_msgs.msg import String from sensor_msgs.msg import Image", "import String from sensor_msgs.msg import Image from cv_bridge import CvBridge,", "image_converter: def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def", "rospy.spin() except KeyboardInterrupt: print(\"Shutting down\") cv2.destroyAllWindows() \"\"\" if __name__ ==", "import print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy import", "e: print(e) cv2.imshow(\"hola\", cv_image) cv2.waitKey(3) def main(args): while True: ic", "__init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try:", "import sys import rospy import cv2 from std_msgs.msg import String", "CvBridge() self.image_sub = rospy.Subscriber(\"/bebop/image_raw\",Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data," ]
[ "MIT License along with # foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.", "a copy of the MIT License along with # foodx_devops_tools.", "# # This file is part of foodx_devops_tools. # #", "This file is part of foodx_devops_tools. # # You should", "foodx_devops_tools. # # You should have received a copy of", "of foodx_devops_tools. # # You should have received a copy", "<reponame>Food-X-Technologies/foodx_devops_tools<filename>foodx_devops_tools/azure/__init__.py # Copyright (c) 2021 Food-X Technologies # # This", "Copyright (c) 2021 Food-X Technologies # # This file is", "along with # foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>. \"\"\"Azure related", "should have received a copy of the MIT License along", "have received a copy of the MIT License along with", "part of foodx_devops_tools. # # You should have received a", "copy of the MIT License along with # foodx_devops_tools. If", "of the MIT License along with # foodx_devops_tools. If not,", "Technologies # # This file is part of foodx_devops_tools. #", "# Copyright (c) 2021 Food-X Technologies # # This file", "the MIT License along with # foodx_devops_tools. If not, see", "# You should have received a copy of the MIT", "file is part of foodx_devops_tools. # # You should have", "(c) 2021 Food-X Technologies # # This file is part", "# This file is part of foodx_devops_tools. # # You", "2021 Food-X Technologies # # This file is part of", "Food-X Technologies # # This file is part of foodx_devops_tools.", "License along with # foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>. \"\"\"Azure", "# # You should have received a copy of the", "received a copy of the MIT License along with #", "is part of foodx_devops_tools. # # You should have received", "with # foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>. \"\"\"Azure related utilities.\"\"\"", "You should have received a copy of the MIT License" ]
[ "to be validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript", "Email. # * Uuid. # * Choice. # * Language.", "the above example, this would be: # class NonEmptyStringMetaclass(object): #", "annotation objects defined by the :mod:`typing` module, these classes are", "# * Country. # * Currency. #* Comparison constraints #", "whenever subscripted. Clearly, this gets #expensive in both space and", "Delete all private factory classes imported above for safety. del", "Each __class_getitem__() dunder method of each \"_BeartypeValidatorFactoryABC\" subclass will need", "above for safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory,", "and third-party objects. Like annotation objects defined by the :mod:`typing`", "with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript those hints with", "# def __isinstancecheck__(cls, obj) -> bool: # return isinstance(obj, str)", "= _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance =", "in the same way: e.g., # from beartype import beartype", "the above example, this # would be: # class NonEmptyStringClass(object,", "would then be implicitly supported by #any runtime type checker.", "(indexed) and are thus intended to annotate callables and variables.", "itself is trivial. We declare a new #\"beartype.is.Portable\" singleton accessed", "different #checkers... except not really, since they'd still have to", "# * IdenticalTo. # * NotIdenticalTo. # * LessThan. #", "So, this is probably actually useless. # #Nonetheless, the idea", "by @Saphyel at #32, add support for #additional classes support", "safety, this new class does *NOT* subclass the #first subscripted", "class does *NOT* subclass the #first subscripted class. Instead: #*", "import beartype # from beartype.is import Portable # NonEmptyStringTest =", "str) and NonEmptyStringTest(obj) #* This new class would then be", "Beartype authors. # See \"LICENSE\" for further details. ''' **Beartype", "# ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting", "and pass that callable when creating and returning # its", "subscriptions of \"Is\" (e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\") are only", "\"beartype.is\" infrastructure to dynamically #synthesize PEP-compliant type hints that would", "would then be entirely empty. For the above example, this", "thus intended to annotate callables and variables. Unlike annotation objects", "be validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript those", "Instead: #* This new metaclass of this new class simply", "a new # mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which #", "an __isinstancecheck__() # dunder method. For the above example, this", "python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype", "a new BeartypeValidator.get_cause_or_none() method with the same #signature and docstring", "singleton accessed in the same way: e.g., # from beartype", "Language. # * Locale. # * Country. # * Currency.", "bool: # return isinstance(obj, str) and NonEmptyStringTest(obj) #* This new", "to annotate callables and variables. Unlike annotation objects defined by", "argparse import ArgumentParser as _ArgumentParser\" rather # than merely \"from", "from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import", "# than merely \"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis", "# #* String constraints: # * Email. # * Uuid.", "\"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is", "# * Range. # * DivisibleBy. #FIXME: Add a new", "all private factory classes imported above for safety. del (", "add support for #additional classes support constraints resembling: # #*", "need to additionally accept a new # mandatory \"get_cause_or_none: Callable[[],", "trivial. We declare a new #\"beartype.is.Portable\" singleton accessed in the", "really, since they'd still have to import beartype #infrastructure to", "def munge_it(text: NonEmptyString) -> str: ... # #So what's the", "for #additional classes support constraints resembling: # #* String constraints:", "be: # class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) -> bool:", "enabling callers to validate the internal structure of arbitrarily complex", "# Public factory singletons instantiating these private factory classes. Is", "#technically improve portability by allowing users to switch between different", "BeartypeValidator.__init__() method will need to additionally accept a new #", "new # mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which # that", "defines an __isinstancecheck__() # dunder method. For the above example,", "of arbitrarily complex scalars, data structures, and third-party objects. Like", "that method should then localize to \"self.get_cause_or_none\". #* Each __class_getitem__()", "* GreaterThan. # * Range. # * DivisibleBy. #FIXME: Add", "Range. # * DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none() method", "requested by @Saphyel at #32, add support for #additional classes", "intelligently requested by @Saphyel at #32, add support for #additional", "import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS }....................", "generate PEP-compliant type hints when subscripted (indexed) and are thus", "#signature and docstring as the existing CauseSleuth.get_cause_or_none() #method. This new", "does *NOT* subclass the #first subscripted class. Instead: #* This", "@Saphyel at #32, add support for #additional classes support constraints", "this would be: # class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj)", "subclass the #first subscripted class. Instead: #* This new metaclass", "why this won't #be the default approach. For safety, this", "#called by the \"_peperrorannotated\" submodule to generate human-readable #exception messages.", "# WARNING: To avoid polluting the public module namespace, external", ":pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript those hints with (in", "to validate the internal structure of arbitrarily complex scalars, data", "module, these classes are *not* explicitly covered by existing PEPs", "beartype. Right? That said, this would #technically improve portability by", "messages. Note that this implies that: #* The BeartypeValidator.__init__() method", "-> str: ... # #So what's the difference between \"typing.Annotated\"", "consumption fast -- which is why this won't #be the", "factory classes imported above for safety. del ( _IsFactory, _IsAttrFactory,", "thus *not* directly usable as annotations. Instead, callers are expected", "course, does anyone care? I mean, if you're using a", "have to import beartype #infrastructure to do so. So, this", "private factory classes. Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual", "Is[lambda text: bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype", "beartype #infrastructure to do so. So, this is probably actually", "\"_BeartypeValidatorFactoryABC\" subclass will need # to additionally define and pass", "#FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we", "type checker. At present, subscriptions of \"Is\" (e.g., #\"Annotated[str, Is[lambda", "Simple. The latter dynamically generates one new PEP 3119-compliant #metaclass", "difference between \"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple. The latter dynamically", "#checkers... except not really, since they'd still have to import", "}.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting the public module", "to dynamically #synthesize PEP-compliant type hints that would then be", "__isinstancecheck__(cls, obj) -> bool: # return isinstance(obj, str) and NonEmptyStringTest(obj)", "pass that callable when creating and returning # its \"BeartypeValidator\"", "\"self.get_cause_or_none\". #* Each __class_getitem__() dunder method of each \"_BeartypeValidatorFactoryABC\" subclass", "the existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method should then", "new BeartypeValidator.get_cause_or_none() method should then be #called by the \"_peperrorannotated\"", "ArgumentParser as _ArgumentParser\" rather # than merely \"from argparse import", "module (e.g., :attr:`typing.Union`), these classes dynamically generate PEP-compliant type hints", "objects defined by the :mod:`typing` module, these classes are *not*", "accept a new # mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which", "this implies that: #* The BeartypeValidator.__init__() method will need to", "defined by the :mod:`typing` module, these classes are *not* explicitly", "would be: # class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) ->", "public module namespace, external attributes # should be locally imported", "fast -- which is why this won't #be the default", "alternate private # names (e.g., \"from argparse import ArgumentParser as", "entirely empty. For the above example, this # would be:", "( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO", "or more subscriptions of classes declared by this submodule. '''", "both space and time consumption fast -- which is why", "_IsEqualFactory # ....................{ SINGLETONS }.................... # Public factory singletons instantiating", "Uuid. # * Choice. # * Language. # * Locale.", "PEP 3119-compliant #metaclass and associated class whenever subscripted. Clearly, this", "private # names (e.g., \"from argparse import ArgumentParser as _ArgumentParser\"", "That said, this would #technically improve portability by allowing users", "under alternate private # names (e.g., \"from argparse import ArgumentParser", "# NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype # def munge_it(text:", "#first subscripted class. Instead: #* This new metaclass of this", "# * NotIdenticalTo. # * LessThan. # * GreaterThan. #", "PEP-compliant type hints that would then be implicitly supported by", "= Portable[str, NonEmptyStringTest] # @beartype # def munge_it(text: NonEmptyString) ->", "be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well, so", "be locally imported at module scope *ONLY* under alternate private", "method. For the above example, this would be: # class", "The type of those parameters and returns. #. One or", "instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that", "\"_peperrorannotated\" submodule to generate human-readable #exception messages. Note that this", "__class_getitem__() dunder method of each \"_BeartypeValidatorFactoryABC\" subclass will need #", "it seems doubtful anyone #would actually do that. Nonetheless, that's", "these classes are *not* explicitly covered by existing PEPs and", "imported at module scope *ONLY* under alternate private # names", "supported by beartype #itself. Of course, does anyone care? I", "will need # to additionally define and pass that callable", "#. Annotate callable parameters and returns to be validated with", "# names (e.g., \"from argparse import ArgumentParser as _ArgumentParser\" rather", "DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same", "infrastructure to dynamically #synthesize PEP-compliant type hints that would then", "# from beartype.is import Portable # NonEmptyStringTest = Is[lambda text:", "BeartypeValidator.get_cause_or_none() method should then be #called by the \"_peperrorannotated\" submodule", "str: ... # #So what's the difference between \"typing.Annotated\" and", "Holyshitballstime. The idea here is that we can #leverage all", "for safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, )", "# * DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none() method with", "are *not* explicitly covered by existing PEPs and thus *not*", "....................{ TODO }.................... #FIXME: As intelligently requested by @Saphyel at", "''' **Beartype validators.** This submodule publishes a PEP-compliant hierarchy of", "are only supported by beartype #itself. Of course, does anyone", "IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') #", "from beartype import beartype # from beartype.is import Portable #", "class would then be entirely empty. For the above example,", "new class simply defines an __isinstancecheck__() # dunder method. For", "(in order): #. The type of those parameters and returns.", "example, this would be: # class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls,", "PEP-compliant type hints when subscripted (indexed) and are thus intended", "# ....................{ SINGLETONS }.................... # Public factory singletons instantiating these", "said, this would #technically improve portability by allowing users to", "directly usable as annotations. Instead, callers are expected to (in", "using a runtime type #checker, you're probably *ONLY* using beartype.", ") # ....................{ TODO }.................... #FIXME: As intelligently requested by", "constraints # * IdenticalTo. # * NotIdenticalTo. # * LessThan.", "#any runtime type checker. At present, subscriptions of \"Is\" (e.g.,", "approach. For safety, this new class does *NOT* subclass the", "class. Instead: #* This new metaclass of this new class", "# pass # #Well, so much for brilliant. It's slow", "One or more subscriptions of classes declared by this submodule.", "Unlike annotation objects defined by the :mod:`typing` module, these classes", "* Locale. # * Country. # * Currency. #* Comparison", "scalars, data structures, and third-party objects. Like annotation objects defined", "_IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass')", "explicitly covered by existing PEPs and thus *not* directly usable", "To avoid polluting the public module namespace, external attributes #", "at module scope *ONLY* under alternate private # names (e.g.,", "at #32, add support for #additional classes support constraints resembling:", "of this new class simply defines an __isinstancecheck__() # dunder", "intended to annotate callables and variables. Unlike annotation objects defined", "submodule to generate human-readable #exception messages. Note that this implies", "localize to \"self.get_cause_or_none\". #* Each __class_getitem__() dunder method of each", "= _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all private factory", "IdenticalTo. # * NotIdenticalTo. # * LessThan. # * GreaterThan.", "is probably actually useless. # #Nonetheless, the idea itself is", "This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable) classes", "to \"self.get_cause_or_none\". #* Each __class_getitem__() dunder method of each \"_BeartypeValidatorFactoryABC\"", "= _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete", "hints. #. Subscript those hints with (in order): #. The", "defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these classes dynamically", "CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method should then be #called", "This new metaclass of this new class simply defines an", "annotate callables and variables. Unlike annotation objects defined by the", "import _IsEqualFactory # ....................{ SINGLETONS }.................... # Public factory singletons", "# def munge_it(text: NonEmptyString) -> str: ... # #So what's", "to additionally accept a new # mandatory \"get_cause_or_none: Callable[[], Optional[str]]\"", "empty. For the above example, this # would be: #", "existing PEPs and thus *not* directly usable as annotations. Instead,", "in both space and time consumption fast -- which is", "by #any runtime type checker. At present, subscriptions of \"Is\"", "won't #be the default approach. For safety, this new class", "further details. ''' **Beartype validators.** This submodule publishes a PEP-compliant", "when subscripted (indexed) and are thus intended to annotate callables", "this new class simply defines an __isinstancecheck__() # dunder method.", "annotation objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these", "bool(text)]]\") are only supported by beartype #itself. Of course, does", "# * Language. # * Locale. # * Country. #", "one new PEP 3119-compliant #metaclass and associated class whenever subscripted.", "#* Comparison constraints # * IdenticalTo. # * NotIdenticalTo. #", "(e.g., :attr:`typing.Union`), these classes dynamically generate PEP-compliant type hints when", "#FIXME: As intelligently requested by @Saphyel at #32, add support", "\"from argparse import ArgumentParser as _ArgumentParser\" rather # than merely", "beartype.is import Portable # NonEmptyStringTest = Is[lambda text: bool(text)] #", "subclass will need # to additionally define and pass that", "PEPs and thus *not* directly usable as annotations. Instead, callers", "and returns to be validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints.", "# that method should then localize to \"self.get_cause_or_none\". #* Each", "mean, if you're using a runtime type #checker, you're probably", "that this implies that: #* The BeartypeValidator.__init__() method will need", "\"LICENSE\" for further details. ''' **Beartype validators.** This submodule publishes", "classes dynamically generate PEP-compliant type hints when subscripted (indexed) and", "are thus intended to annotate callables and variables. Unlike annotation", "safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) #", "# * GreaterThan. # * Range. # * DivisibleBy. #FIXME:", "existing \"beartype.is\" infrastructure to dynamically #synthesize PEP-compliant type hints that", "authors. # See \"LICENSE\" for further details. ''' **Beartype validators.**", "can #leverage all of our existing \"beartype.is\" infrastructure to dynamically", "support for #additional classes support constraints resembling: # #* String", "# return isinstance(obj, str) and NonEmptyStringTest(obj) #* This new class", "should then localize to \"self.get_cause_or_none\". #* Each __class_getitem__() dunder method", "The idea here is that we can #leverage all of", "module scope *ONLY* under alternate private # names (e.g., \"from", "and big, so it seems doubtful anyone #would actually do", "# --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors.", "names (e.g., \"from argparse import ArgumentParser as _ArgumentParser\" rather #", "the internal structure of arbitrarily complex scalars, data structures, and", "publishes a PEP-compliant hierarchy of subscriptable (indexable) classes enabling callers", "#. The type of those parameters and returns. #. One", "from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{", "}.................... # Public factory singletons instantiating these private factory classes.", "( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper", "Is[lambda text: bool(text)]]\") are only supported by beartype #itself. Of", "#Nonetheless, the idea itself is trivial. We declare a new", "# NonEmptyStringTest = Is[lambda text: bool(text)] # NonEmptyString = Portable[str,", "class whenever subscripted. Clearly, this gets #expensive in both space", "''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid", "# Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for", "The latter dynamically generates one new PEP 3119-compliant #metaclass and", "* Country. # * Currency. #* Comparison constraints # *", "\"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which # that method should then", "(e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\") are only supported by beartype", "space and time consumption fast -- which is why this", "this is probably actually useless. # #Nonetheless, the idea itself", "\"beartype.is.Portable\" #then? Simple. The latter dynamically generates one new PEP", "with (in order): #. The type of those parameters and", "declared by this submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", "of subscriptable (indexable) classes enabling callers to validate the internal", "(e.g., \"from argparse import ArgumentParser as _ArgumentParser\" rather # than", "#* The BeartypeValidator.__init__() method will need to additionally accept a", "#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting the public module namespace,", "# See \"LICENSE\" for further details. ''' **Beartype validators.** This", "returns to be validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #.", "latter dynamically generates one new PEP 3119-compliant #metaclass and associated", "new #\"beartype.is.Portable\" singleton accessed in the same way: e.g., #", "= _IsSubclassFactory(basename='IsSubclass') # Delete all private factory classes imported above", "Portable # NonEmptyStringTest = Is[lambda text: bool(text)] # NonEmptyString =", "hints with (in order): #. The type of those parameters", "Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance", "_IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO }....................", "data structures, and third-party objects. Like annotation objects defined by", "human-readable #exception messages. Note that this implies that: #* The", "Right? That said, this would #technically improve portability by allowing", "validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript those hints", "except not really, since they'd still have to import beartype", "#\"Annotated[str, Is[lambda text: bool(text)]]\") are only supported by beartype #itself.", "beartype import beartype # from beartype.is import Portable # NonEmptyStringTest", "hierarchy of subscriptable (indexable) classes enabling callers to validate the", "annotations. Instead, callers are expected to (in order): #. Annotate", "anyone #would actually do that. Nonetheless, that's food for thought", "by allowing users to switch between different #checkers... except not", "For the above example, this # would be: # class", "#FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same #signature", "which is why this won't #be the default approach. For", "callable parameters and returns to be validated with :pep:`593`-compliant :attr:`typing.Annotated`", "avoid polluting the public module namespace, external attributes # should", "callable when creating and returning # its \"BeartypeValidator\" instance. #FIXME:", "should be locally imported at module scope *ONLY* under alternate", "what's the difference between \"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple. The", "# dunder method. For the above example, this would be:", "support constraints resembling: # #* String constraints: # * Email.", "#* Each __class_getitem__() dunder method of each \"_BeartypeValidatorFactoryABC\" subclass will", "singletons instantiating these private factory classes. Is = _IsFactory(basename='Is') IsAttr", "attributes # should be locally imported at module scope *ONLY*", "for further details. ''' **Beartype validators.** This submodule publishes a", "the default approach. For safety, this new class does *NOT*", "# @beartype # def munge_it(text: NonEmptyString) -> str: ... #", "* Email. # * Uuid. # * Choice. # *", "its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here", "subscriptions of classes declared by this submodule. ''' # ....................{", "import ArgumentParser as _ArgumentParser\" rather # than merely \"from argparse", "between different #checkers... except not really, since they'd still have", "Clearly, this gets #expensive in both space and time consumption", "We declare a new #\"beartype.is.Portable\" singleton accessed in the same", "you're using a runtime type #checker, you're probably *ONLY* using", "care? I mean, if you're using a runtime type #checker,", "dynamically generate PEP-compliant type hints when subscripted (indexed) and are", "beartype # from beartype.is import Portable # NonEmptyStringTest = Is[lambda", "for brilliant. It's slow and big, so it seems doubtful", "# class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well, so much", "type hints. #. Subscript those hints with (in order): #.", "*NOT* subclass the #first subscripted class. Instead: #* This new", "IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting the public", "of \"Is\" (e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\") are only supported", "a new #\"beartype.is.Portable\" singleton accessed in the same way: e.g.,", "constraints resembling: # #* String constraints: # * Email. #", "* IdenticalTo. # * NotIdenticalTo. # * LessThan. # *", "import ( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory from", "# * Uuid. # * Choice. # * Language. #", "parameters and returns to be validated with :pep:`593`-compliant :attr:`typing.Annotated` type", "#be the default approach. For safety, this new class does", "module namespace, external attributes # should be locally imported at", "\"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple. The latter dynamically generates one", "# to additionally define and pass that callable when creating", "new class would then be entirely empty. For the above", "e.g., # from beartype import beartype # from beartype.is import", "* Language. # * Locale. # * Country. # *", "#* String constraints: # * Email. # * Uuid. #", "idea here is that we can #leverage all of our", "a PEP-compliant hierarchy of subscriptable (indexable) classes enabling callers to", "parameters and returns. #. One or more subscriptions of classes", "the idea itself is trivial. We declare a new #\"beartype.is.Portable\"", "of our existing \"beartype.is\" infrastructure to dynamically #synthesize PEP-compliant type", "type hints that would then be implicitly supported by #any", "# * Choice. # * Language. # * Locale. #", "pass # #Well, so much for brilliant. It's slow and", "beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory", "As intelligently requested by @Saphyel at #32, add support for", "_IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import", "are expected to (in order): #. Annotate callable parameters and", "NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype # def munge_it(text: NonEmptyString)", "callers to validate the internal structure of arbitrarily complex scalars,", "*ONLY* using beartype. Right? That said, this would #technically improve", "expected to (in order): #. Annotate callable parameters and returns", "#exception messages. Note that this implies that: #* The BeartypeValidator.__init__()", "should then be #called by the \"_peperrorannotated\" submodule to generate", "here is that we can #leverage all of our existing", "returns. #. One or more subscriptions of classes declared by", "#32, add support for #additional classes support constraints resembling: #", "Subscript those hints with (in order): #. The type of", "classes. Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual')", "__isinstancecheck__() # dunder method. For the above example, this would", "LessThan. # * GreaterThan. # * Range. # * DivisibleBy.", "from beartype.is import Portable # NonEmptyStringTest = Is[lambda text: bool(text)]", "polluting the public module namespace, external attributes # should be", "would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well,", "beartype #itself. Of course, does anyone care? I mean, if", "_IsSubclassFactory, ) # ....................{ TODO }.................... #FIXME: As intelligently requested", "the :mod:`typing` module (e.g., :attr:`typing.Union`), these classes dynamically generate PEP-compliant", "constraints: # * Email. # * Uuid. # * Choice.", "metaclass of this new class simply defines an __isinstancecheck__() #", "#method. This new BeartypeValidator.get_cause_or_none() method should then be #called by", "complex scalars, data structures, and third-party objects. Like annotation objects", "At present, subscriptions of \"Is\" (e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\")", "* DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the", "with the same #signature and docstring as the existing CauseSleuth.get_cause_or_none()", "_IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance')", "new BeartypeValidator.get_cause_or_none() method with the same #signature and docstring as", "and variables. Unlike annotation objects defined by the :mod:`typing` module,", "This new class would then be entirely empty. For the", "from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS }.................... # Public", "rather # than merely \"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from", "usable as annotations. Instead, callers are expected to (in order):", "to generate human-readable #exception messages. Note that this implies that:", "variables. Unlike annotation objects defined by the :mod:`typing` module, these", "implies that: #* The BeartypeValidator.__init__() method will need to additionally", "and returns. #. One or more subscriptions of classes declared", "to import beartype #infrastructure to do so. So, this is", "method will need to additionally accept a new # mandatory", "#expensive in both space and time consumption fast -- which", "obj) -> bool: # return isinstance(obj, str) and NonEmptyStringTest(obj) #*", "NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) -> bool: # return isinstance(obj,", "-> bool: # return isinstance(obj, str) and NonEmptyStringTest(obj) #* This", "actually useless. # #Nonetheless, the idea itself is trivial. We", "_IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO }.................... #FIXME: As", "is that we can #leverage all of our existing \"beartype.is\"", "(indexable) classes enabling callers to validate the internal structure of", "than merely \"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import", "....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting the", "and docstring as the existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none()", "those parameters and returns. #. One or more subscriptions of", "the difference between \"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple. The latter", "@beartype # def munge_it(text: NonEmptyString) -> str: ... # #So", "will need to additionally accept a new # mandatory \"get_cause_or_none:", "then be entirely empty. For the above example, this #", "It's slow and big, so it seems doubtful anyone #would", "WARNING: To avoid polluting the public module namespace, external attributes", "beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS }.................... # Public factory", "\"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from", "_IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO }.................... #FIXME: As intelligently", "third-party objects. Like annotation objects defined by the :mod:`typing` module", "*BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can", "2014-2021 Beartype authors. # See \"LICENSE\" for further details. '''", "NonEmptyStringTest = Is[lambda text: bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest]", "between \"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple. The latter dynamically generates", "Comparison constraints # * IdenticalTo. # * NotIdenticalTo. # *", "objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these classes", "dunder method. For the above example, this would be: #", "subscripted (indexed) and are thus intended to annotate callables and", "hints that would then be implicitly supported by #any runtime", "#So what's the difference between \"typing.Annotated\" and \"beartype.is.Portable\" #then? Simple.", "text: bool(text)]]\") are only supported by beartype #itself. Of course,", "as _ArgumentParser\" rather # than merely \"from argparse import ArgumentParser\").", "each \"_BeartypeValidatorFactoryABC\" subclass will need # to additionally define and", "submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To", "#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory,", "this gets #expensive in both space and time consumption fast", "and NonEmptyStringTest(obj) #* This new class would then be entirely", "supported by #any runtime type checker. At present, subscriptions of", "SINGLETONS }.................... # Public factory singletons instantiating these private factory", "IDEA.* Holyshitballstime. The idea here is that we can #leverage", "#Well, so much for brilliant. It's slow and big, so", "that we can #leverage all of our existing \"beartype.is\" infrastructure", "class simply defines an __isinstancecheck__() # dunder method. For the", "of those parameters and returns. #. One or more subscriptions", "# should be locally imported at module scope *ONLY* under", "# from beartype import beartype # from beartype.is import Portable", "by the :mod:`typing` module, these classes are *not* explicitly covered", "present, subscriptions of \"Is\" (e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\") are", "subscripted class. Instead: #* This new metaclass of this new", "# * Email. # * Uuid. # * Choice. #", "... # #So what's the difference between \"typing.Annotated\" and \"beartype.is.Portable\"", "the #first subscripted class. Instead: #* This new metaclass of", "NonEmptyStringTest(obj) #* This new class would then be entirely empty.", "#. Subscript those hints with (in order): #. The type", "....................{ SINGLETONS }.................... # Public factory singletons instantiating these private", "then be #called by the \"_peperrorannotated\" submodule to generate human-readable", "you're probably *ONLY* using beartype. Right? That said, this would", "same #signature and docstring as the existing CauseSleuth.get_cause_or_none() #method. This", "subscripted. Clearly, this gets #expensive in both space and time", "Public factory singletons instantiating these private factory classes. Is =", "simply defines an __isinstancecheck__() # dunder method. For the above", "Country. # * Currency. #* Comparison constraints # * IdenticalTo.", "classes enabling callers to validate the internal structure of arbitrarily", "= Is[lambda text: bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest] #", "time consumption fast -- which is why this won't #be", ":mod:`typing` module (e.g., :attr:`typing.Union`), these classes dynamically generate PEP-compliant type", "since they'd still have to import beartype #infrastructure to do", "ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import (", "_IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all", "= _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass =", "#would actually do that. Nonetheless, that's food for thought for", "gets #expensive in both space and time consumption fast --", "objects. Like annotation objects defined by the :mod:`typing` module (e.g.,", "isinstance(obj, str) and NonEmptyStringTest(obj) #* This new class would then", "return isinstance(obj, str) and NonEmptyStringTest(obj) #* This new class would", "IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass", "if you're using a runtime type #checker, you're probably *ONLY*", "# #Well, so much for brilliant. It's slow and big,", "additionally define and pass that callable when creating and returning", "brilliant. It's slow and big, so it seems doubtful anyone", "**Beartype validators.** This submodule publishes a PEP-compliant hierarchy of subscriptable", "our existing \"beartype.is\" infrastructure to dynamically #synthesize PEP-compliant type hints", "creating and returning # its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.*", "Currency. #* Comparison constraints # * IdenticalTo. # * NotIdenticalTo.", "class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well, so much for", "# Delete all private factory classes imported above for safety.", "above example, this # would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass):", "#. One or more subscriptions of classes declared by this", "only supported by beartype #itself. Of course, does anyone care?", "anyone care? I mean, if you're using a runtime type", "_IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj", "_IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO }.................... #FIXME:", "all of our existing \"beartype.is\" infrastructure to dynamically #synthesize PEP-compliant", "checker. At present, subscriptions of \"Is\" (e.g., #\"Annotated[str, Is[lambda text:", "resembling: # #* String constraints: # * Email. # *", "_IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all private factory classes", "This new BeartypeValidator.get_cause_or_none() method should then be #called by the", "callables and variables. Unlike annotation objects defined by the :mod:`typing`", "order): #. The type of those parameters and returns. #.", "#!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021", "implicitly supported by #any runtime type checker. At present, subscriptions", "type hints when subscripted (indexed) and are thus intended to", "same way: e.g., # from beartype import beartype # from", "}.................... #FIXME: As intelligently requested by @Saphyel at #32, add", "when creating and returning # its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT", "example, this # would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): #", "this submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING:", "arbitrarily complex scalars, data structures, and third-party objects. Like annotation", "by the \"_peperrorannotated\" submodule to generate human-readable #exception messages. Note", "import Portable # NonEmptyStringTest = Is[lambda text: bool(text)] # NonEmptyString", "factory singletons instantiating these private factory classes. Is = _IsFactory(basename='Is')", "callers are expected to (in order): #. Annotate callable parameters", "mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which # that method should", "method of each \"_BeartypeValidatorFactoryABC\" subclass will need # to additionally", "#* This new metaclass of this new class simply defines", "#leverage all of our existing \"beartype.is\" infrastructure to dynamically #synthesize", "the \"_peperrorannotated\" submodule to generate human-readable #exception messages. Note that", "runtime type checker. At present, subscriptions of \"Is\" (e.g., #\"Annotated[str,", "the same #signature and docstring as the existing CauseSleuth.get_cause_or_none() #method.", "runtime type #checker, you're probably *ONLY* using beartype. Right? That", "to do so. So, this is probably actually useless. #", ":mod:`typing` module, these classes are *not* explicitly covered by existing", "they'd still have to import beartype #infrastructure to do so.", "import beartype #infrastructure to do so. So, this is probably", "the same way: e.g., # from beartype import beartype #", "validate the internal structure of arbitrarily complex scalars, data structures,", "docstring as the existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method", "we can #leverage all of our existing \"beartype.is\" infrastructure to", "(c) 2014-2021 Beartype authors. # See \"LICENSE\" for further details.", "LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See", "and \"beartype.is.Portable\" #then? Simple. The latter dynamically generates one new", "_ArgumentParser\" rather # than merely \"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", "beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS", "bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype # def", "from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory,", "of each \"_BeartypeValidatorFactoryABC\" subclass will need # to additionally define", "so it seems doubtful anyone #would actually do that. Nonetheless,", "# * Locale. # * Country. # * Currency. #*", "then be implicitly supported by #any runtime type checker. At", "NonEmptyStringTest] # @beartype # def munge_it(text: NonEmptyString) -> str: ...", "generates one new PEP 3119-compliant #metaclass and associated class whenever", "and returning # its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime.", "# #Nonetheless, the idea itself is trivial. We declare a", "big, so it seems doubtful anyone #would actually do that.", "factory classes. Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual =", "of classes declared by this submodule. ''' # ....................{ IMPORTS", "IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all private", "so much for brilliant. It's slow and big, so it", "\"Is\" (e.g., #\"Annotated[str, Is[lambda text: bool(text)]]\") are only supported by", "Optional[str]]\" parameter, which # that method should then localize to", "validators.** This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable)", ":attr:`typing.Annotated` type hints. #. Subscript those hints with (in order):", "parameter, which # that method should then localize to \"self.get_cause_or_none\".", "dynamically generates one new PEP 3119-compliant #metaclass and associated class", "* NotIdenticalTo. # * LessThan. # * GreaterThan. # *", "String constraints: # * Email. # * Uuid. # *", "as annotations. Instead, callers are expected to (in order): #.", "TODO }.................... #FIXME: As intelligently requested by @Saphyel at #32,", "* Uuid. # * Choice. # * Language. # *", "For safety, this new class does *NOT* subclass the #first", "structure of arbitrarily complex scalars, data structures, and third-party objects.", "define and pass that callable when creating and returning #", "_IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory", "by this submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #", "by beartype #itself. Of course, does anyone care? I mean,", "additionally accept a new # mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter,", "#additional classes support constraints resembling: # #* String constraints: #", "* Range. # * DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none()", "need # to additionally define and pass that callable when", "# mandatory \"get_cause_or_none: Callable[[], Optional[str]]\" parameter, which # that method", "this won't #be the default approach. For safety, this new", "import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import", "not really, since they'd still have to import beartype #infrastructure", "doubtful anyone #would actually do that. Nonetheless, that's food for", "classes imported above for safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory,", "returning # its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The", "by existing PEPs and thus *not* directly usable as annotations.", "The BeartypeValidator.__init__() method will need to additionally accept a new", "to switch between different #checkers... except not really, since they'd", "3119-compliant #metaclass and associated class whenever subscripted. Clearly, this gets", "do so. So, this is probably actually useless. # #Nonetheless,", "NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well, so much for brilliant.", "does anyone care? I mean, if you're using a runtime", "classes support constraints resembling: # #* String constraints: # *", "details. ''' **Beartype validators.** This submodule publishes a PEP-compliant hierarchy", "to additionally define and pass that callable when creating and", "that: #* The BeartypeValidator.__init__() method will need to additionally accept", "which # that method should then localize to \"self.get_cause_or_none\". #*", "argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype", "# * Currency. #* Comparison constraints # * IdenticalTo. #", "#* This new class would then be entirely empty. For", "_IsSubclassFactory(basename='IsSubclass') # Delete all private factory classes imported above for", "portability by allowing users to switch between different #checkers... except", "generate human-readable #exception messages. Note that this implies that: #*", "Instead, callers are expected to (in order): #. Annotate callable", "existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method should then be", "and thus *not* directly usable as annotations. Instead, callers are", "For the above example, this would be: # class NonEmptyStringMetaclass(object):", "would #technically improve portability by allowing users to switch between", "more subscriptions of classes declared by this submodule. ''' #", "switch between different #checkers... except not really, since they'd still", "* Choice. # * Language. # * Locale. # *", "new metaclass of this new class simply defines an __isinstancecheck__()", "BeartypeValidator.get_cause_or_none() method with the same #signature and docstring as the", "probably actually useless. # #Nonetheless, the idea itself is trivial.", "class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) -> bool: # return", ":attr:`typing.Union`), these classes dynamically generate PEP-compliant type hints when subscripted", "be entirely empty. For the above example, this # would", "by the :mod:`typing` module (e.g., :attr:`typing.Union`), these classes dynamically generate", "actually do that. Nonetheless, that's food for thought for you.", "del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{", "be implicitly supported by #any runtime type checker. At present,", "IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all private factory classes imported", "those hints with (in order): #. The type of those", "GreaterThan. # * Range. # * DivisibleBy. #FIXME: Add a", "# its \"BeartypeValidator\" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea", "associated class whenever subscripted. Clearly, this gets #expensive in both", "new class does *NOT* subclass the #first subscripted class. Instead:", "metaclass=NonEmptyStringMetaclass): # pass # #Well, so much for brilliant. It's", "#checker, you're probably *ONLY* using beartype. Right? That said, this", "# ....................{ TODO }.................... #FIXME: As intelligently requested by @Saphyel", "munge_it(text: NonEmptyString) -> str: ... # #So what's the difference", "hints when subscripted (indexed) and are thus intended to annotate", "this would #technically improve portability by allowing users to switch", "users to switch between different #checkers... except not really, since", "I mean, if you're using a runtime type #checker, you're", "PEP-compliant hierarchy of subscriptable (indexable) classes enabling callers to validate", "merely \"from argparse import ArgumentParser\"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory", "imported above for safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory,", "accessed in the same way: e.g., # from beartype import", "def __isinstancecheck__(cls, obj) -> bool: # return isinstance(obj, str) and", "these private factory classes. Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr')", "-- which is why this won't #be the default approach.", "* Currency. #* Comparison constraints # * IdenticalTo. # *", "and time consumption fast -- which is why this won't", "external attributes # should be locally imported at module scope", "# would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass #", "classes are *not* explicitly covered by existing PEPs and thus", "*ONLY* under alternate private # names (e.g., \"from argparse import", "this # would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass", "and are thus intended to annotate callables and variables. Unlike", "probably *ONLY* using beartype. Right? That said, this would #technically", "# #So what's the difference between \"typing.Annotated\" and \"beartype.is.Portable\" #then?", "Add a new BeartypeValidator.get_cause_or_none() method with the same #signature and", "#metaclass and associated class whenever subscripted. Clearly, this gets #expensive", "way: e.g., # from beartype import beartype # from beartype.is", "beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, )", "Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for further", "above example, this would be: # class NonEmptyStringMetaclass(object): # def", "then localize to \"self.get_cause_or_none\". #* Each __class_getitem__() dunder method of", "type of those parameters and returns. #. One or more", "the public module namespace, external attributes # should be locally", "order): #. Annotate callable parameters and returns to be validated", "instantiating these private factory classes. Is = _IsFactory(basename='Is') IsAttr =", "default approach. For safety, this new class does *NOT* subclass", "a runtime type #checker, you're probably *ONLY* using beartype. Right?", "so. So, this is probably actually useless. # #Nonetheless, the", "subscriptable (indexable) classes enabling callers to validate the internal structure", "that would then be implicitly supported by #any runtime type", "Portable[str, NonEmptyStringTest] # @beartype # def munge_it(text: NonEmptyString) -> str:", "covered by existing PEPs and thus *not* directly usable as", "as the existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method should", "using beartype. Right? That said, this would #technically improve portability", "--------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. #", "is why this won't #be the default approach. For safety,", "Like annotation objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`),", "Annotate callable parameters and returns to be validated with :pep:`593`-compliant", "declare a new #\"beartype.is.Portable\" singleton accessed in the same way:", "locally imported at module scope *ONLY* under alternate private #", "and associated class whenever subscripted. Clearly, this gets #expensive in", "classes declared by this submodule. ''' # ....................{ IMPORTS }....................", "(in order): #. Annotate callable parameters and returns to be", "#infrastructure to do so. So, this is probably actually useless.", "still have to import beartype #infrastructure to do so. So,", "private factory classes imported above for safety. del ( _IsFactory,", "method should then localize to \"self.get_cause_or_none\". #* Each __class_getitem__() dunder", "improve portability by allowing users to switch between different #checkers...", "new PEP 3119-compliant #metaclass and associated class whenever subscripted. Clearly,", "#synthesize PEP-compliant type hints that would then be implicitly supported", "*not* directly usable as annotations. Instead, callers are expected to", "these classes dynamically generate PEP-compliant type hints when subscripted (indexed)", "Of course, does anyone care? I mean, if you're using", "method should then be #called by the \"_peperrorannotated\" submodule to", "#itself. Of course, does anyone care? I mean, if you're", "submodule publishes a PEP-compliant hierarchy of subscriptable (indexable) classes enabling", "idea itself is trivial. We declare a new #\"beartype.is.Portable\" singleton", "# class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) -> bool: #", "* LessThan. # * GreaterThan. # * Range. # *", "namespace, external attributes # should be locally imported at module", "dunder method of each \"_BeartypeValidatorFactoryABC\" subclass will need # to", "dynamically #synthesize PEP-compliant type hints that would then be implicitly", "NotIdenticalTo. # * LessThan. # * GreaterThan. # * Range.", "structures, and third-party objects. Like annotation objects defined by the", "Locale. # * Country. # * Currency. #* Comparison constraints", "Note that this implies that: #* The BeartypeValidator.__init__() method will", "scope *ONLY* under alternate private # names (e.g., \"from argparse", "be #called by the \"_peperrorannotated\" submodule to generate human-readable #exception", "text: bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype #", "useless. # #Nonetheless, the idea itself is trivial. We declare", "_IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS }.................... #", "Choice. # * Language. # * Locale. # * Country.", "internal structure of arbitrarily complex scalars, data structures, and third-party", ")-------------------- # Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\"", "See \"LICENSE\" for further details. ''' **Beartype validators.** This submodule", "# * LessThan. # * GreaterThan. # * Range. #", "NonEmptyString) -> str: ... # #So what's the difference between", "to (in order): #. Annotate callable parameters and returns to", "this new class does *NOT* subclass the #first subscripted class.", "much for brilliant. It's slow and big, so it seems", "that callable when creating and returning # its \"BeartypeValidator\" instance.", "#then? Simple. The latter dynamically generates one new PEP 3119-compliant", "is trivial. We declare a new #\"beartype.is.Portable\" singleton accessed in", "slow and big, so it seems doubtful anyone #would actually", "type #checker, you're probably *ONLY* using beartype. Right? That said,", "#\"beartype.is.Portable\" singleton accessed in the same way: e.g., # from", "the :mod:`typing` module, these classes are *not* explicitly covered by", ") from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory #", "seems doubtful anyone #would actually do that. Nonetheless, that's food", "Callable[[], Optional[str]]\" parameter, which # that method should then localize", "*not* explicitly covered by existing PEPs and thus *not* directly", "import _IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, ) from", "allowing users to switch between different #checkers... except not really,", "method with the same #signature and docstring as the existing" ]