ngram
listlengths
0
67.8k
[ "<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- import pkg_resources __version__ = pkg_resources.get_distribution('g-pypi').version.replace('dev',", "#!/usr/bin/env python # -*- coding: utf-8 -*- import pkg_resources __version__ = pkg_resources.get_distribution('g-pypi').version.replace('dev', '')" ]
[ "scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale = scale.reshape(1, -1) return [offset, scale]", "X[0] / 2 offY[i] = X[1] / (2 * X[3]) offZ[i] = X[2]", "* offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] =", "= np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1] ** 2, -", "X[3]) offZ[i] = X[2] / (2 * X[4]) temp = X[5] + offX[i]", "= X[2] / (2 * X[4]) temp = X[5] + offX[i] ** 2", "2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp /", "offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ", "np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in", "3:i * 3 + 3] H = np.array([mag[:, 0], mag[:, 1], mag[:, 2],", "2 offY[i] = X[1] / (2 * X[3]) offZ[i] = X[2] / (2", "data = data[cut: -cut] nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor) offY", "- mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2", "mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)),", "# cut = int(data.shape[0]/10) # data = data[cut: -cut] nsensor = int(data.shape[1] /", "offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale = scale.reshape(1, -1) return", "/ X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale", "np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor)", "H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] /", "-1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale = scale.reshape(1, -1) return [offset,", "- mag[:, 1] ** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w", "def calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10) # data = data[cut:", "data = read_data(path) # cut = int(data.shape[0]/10) # data = data[cut: -cut] nsensor", "X[5] + offX[i] ** 2 + X[3] * offY[i]**2 + X[4] * offZ[i]", "np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ],", "X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset", "= np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY =", "# data = data[cut: -cut] nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor)", "0])]).T w = mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X", "<reponame>dychen24/magx import numpy as np from .data_reader import read_data def calibrate(path): data =", "mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2 tmp", "scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4])", "X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3])", "scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor):", "= mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T,", "= data[:, i * 3:i * 3 + 3] H = np.array([mag[:, 0],", "* X[3]) offZ[i] = X[2] / (2 * X[4]) temp = X[5] +", "3] H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1] **", "** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp", "/ (2 * X[4]) temp = X[5] + offX[i] ** 2 + X[3]", "= X[1] / (2 * X[3]) offZ[i] = X[2] / (2 * X[4])", "* 3 + 3] H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], -", "read_data(path) # cut = int(data.shape[0]/10) # data = data[cut: -cut] nsensor = int(data.shape[1]", "nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ =", "= np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2 offY[i] =", "offZ], axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale", "1] ** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:,", "2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)),", "np.zeros(nsensor) for i in range(nsensor): mag = data[:, i * 3:i * 3", "= X[0] / 2 offY[i] = X[1] / (2 * X[3]) offZ[i] =", "from .data_reader import read_data def calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10)", "X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2 offY[i]", "= data[cut: -cut] nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor) offY =", "scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor): mag = data[:,", "2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape)", "/ (2 * X[3]) offZ[i] = X[2] / (2 * X[4]) temp =", "3 + 3] H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:,", "np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2 offY[i] = X[1]", "= np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX, offY,", "np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset =", "+ 3] H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1]", "np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1)", "scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX,", "X[1] / (2 * X[3]) offZ[i] = X[2] / (2 * X[4]) temp", "= np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset", "i in range(nsensor): mag = data[:, i * 3:i * 3 + 3]", "(2 * X[4]) temp = X[5] + offX[i] ** 2 + X[3] *", "= np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ =", "= read_data(path) # cut = int(data.shape[0]/10) # data = data[cut: -cut] nsensor =", "offset = np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX,", ".data_reader import read_data def calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10) #", "H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2", "offX[i] ** 2 + X[3] * offY[i]**2 + X[4] * offZ[i] ** 2", "offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp", "(2 * X[3]) offZ[i] = X[2] / (2 * X[4]) temp = X[5]", "/ X[3]) scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ], axis=0).T", "cut = int(data.shape[0]/10) # data = data[cut: -cut] nsensor = int(data.shape[1] / 3)", "data[:, i * 3:i * 3 + 3] H = np.array([mag[:, 0], mag[:,", "** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0]", "as np from .data_reader import read_data def calibrate(path): data = read_data(path) # cut", "offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale = scale.reshape(1,", "data[cut: -cut] nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor)", "2], - mag[:, 1] ** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T", "offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for", "np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1] ** 2, - mag[:,", "mag[:, 2], - mag[:, 1] ** 2, - mag[:, 2] ** 2, np.ones_like(mag[:,", "# print(X.shape) offX[i] = X[0] / 2 offY[i] = X[1] / (2 *", "import numpy as np from .data_reader import read_data def calibrate(path): data = read_data(path)", "* X[4]) temp = X[5] + offX[i] ** 2 + X[3] * offY[i]**2", "= offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale = scale.reshape(1, -1)", "range(nsensor): mag = data[:, i * 3:i * 3 + 3] H =", "= np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor): mag", "offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i] =", "= int(data.shape[1] / 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor)", "H)), H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2 offY[i] = X[1] /", "np from .data_reader import read_data def calibrate(path): data = read_data(path) # cut =", "/ 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX =", "= np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor): mag = data[:, i", "np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T)", "2 + X[3] * offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i] =", "X[4]) temp = X[5] + offX[i] ** 2 + X[3] * offY[i]**2 +", "mag = data[:, i * 3:i * 3 + 3] H = np.array([mag[:,", "numpy as np from .data_reader import read_data def calibrate(path): data = read_data(path) #", "0], mag[:, 1], mag[:, 2], - mag[:, 1] ** 2, - mag[:, 2]", "** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) #", "np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] = X[0]", "+ X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp /", "1], mag[:, 2], - mag[:, 1] ** 2, - mag[:, 2] ** 2,", "mag[:, 1], mag[:, 2], - mag[:, 1] ** 2, - mag[:, 2] **", "* offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i] = np.sqrt(temp / X[3]) scaleZ[i]", "X[3] * offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp) scaleY[i]", "X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale =", "** 2 + X[3] * offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i]", "int(data.shape[1] / 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX", "2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2 tmp =", "scaleZ[i] = np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset =", "np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ],", "import read_data def calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10) # data", "= np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i] =", "H.T).dot(w) # print(X.shape) offX[i] = X[0] / 2 offY[i] = X[1] / (2", "tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w) # print(X.shape) offX[i]", "= np.zeros(nsensor) for i in range(nsensor): mag = data[:, i * 3:i *", "offZ[i] = X[2] / (2 * X[4]) temp = X[5] + offX[i] **", "= np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i", "offY[i] = X[1] / (2 * X[3]) offZ[i] = X[2] / (2 *", "temp = X[5] + offX[i] ** 2 + X[3] * offY[i]**2 + X[4]", "0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T).dot(w)", "int(data.shape[0]/10) # data = data[cut: -cut] nsensor = int(data.shape[1] / 3) offX =", "= X[5] + offX[i] ** 2 + X[3] * offY[i]**2 + X[4] *", "print(X.shape) offX[i] = X[0] / 2 offY[i] = X[1] / (2 * X[3])", "axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T scale =", "i * 3:i * 3 + 3] H = np.array([mag[:, 0], mag[:, 1],", "2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] **", "H = np.array([mag[:, 0], mag[:, 1], mag[:, 2], - mag[:, 1] ** 2,", "3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor)", "np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor): mag = data[:, i *", "= np.sqrt(temp / X[4]) offset = np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1,", "/ 2 offY[i] = X[1] / (2 * X[3]) offZ[i] = X[2] /", "calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10) # data = data[cut: -cut]", "np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor)", "read_data def calibrate(path): data = read_data(path) # cut = int(data.shape[0]/10) # data =", "+ offX[i] ** 2 + X[3] * offY[i]**2 + X[4] * offZ[i] **", "-cut] nsensor = int(data.shape[1] / 3) offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ", "np.zeros(nsensor) scaleY = np.zeros(nsensor) scaleZ = np.zeros(nsensor) for i in range(nsensor): mag =", "offX[i] = X[0] / 2 offY[i] = X[1] / (2 * X[3]) offZ[i]", "* 3:i * 3 + 3] H = np.array([mag[:, 0], mag[:, 1], mag[:,", "+ X[3] * offY[i]**2 + X[4] * offZ[i] ** 2 scaleX[i] = np.sqrt(temp)", "scaleZ = np.zeros(nsensor) for i in range(nsensor): mag = data[:, i * 3:i", "X[2] / (2 * X[4]) temp = X[5] + offX[i] ** 2 +", "w = mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T, H)), H.T) X =", "** 2, np.ones_like(mag[:, 0])]).T w = mag[:, 0] ** 2 tmp = np.matmul(np.linalg.inv(np.matmul(H.T,", "offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY, scaleZ], axis=0).T", "for i in range(nsensor): mag = data[:, i * 3:i * 3 +", "offX = np.zeros(nsensor) offY = np.zeros(nsensor) offZ = np.zeros(nsensor) scaleX = np.zeros(nsensor) scaleY", "= np.stack([offX, offY, offZ], axis=0).T offset = offset.reshape(1, -1) scale = np.stack([scaleX, scaleY,", "in range(nsensor): mag = data[:, i * 3:i * 3 + 3] H", "mag[:, 1] ** 2, - mag[:, 2] ** 2, np.ones_like(mag[:, 0])]).T w =", "= int(data.shape[0]/10) # data = data[cut: -cut] nsensor = int(data.shape[1] / 3) offX" ]
[]
[ "from setuptools import setup, find_packages long_description = \"\"\"tcapy is a transaction cost analysis", "transaction cost analysis library for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction", "determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>',", "find_packages long_description = \"\"\"tcapy is a transaction cost analysis library for determining calculating", "cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'],", "library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'], url='https://github.com/cuemacro/tcapy', packages=find_packages(),", "for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>',", "import setup, find_packages long_description = \"\"\"tcapy is a transaction cost analysis library for", "analysis library for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis", "author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'], url='https://github.com/cuemacro/tcapy', packages=find_packages(), include_package_data=True,", "analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'], url='https://github.com/cuemacro/tcapy',", "license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'], url='https://github.com/cuemacro/tcapy', packages=find_packages(), include_package_data=True, install_requires=[], zip_safe=False)", "costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas',", "is a transaction cost analysis library for determining calculating your trading costs\"\"\" setup(name='tcapy',", "calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache", "setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA',", "setuptools import setup, find_packages long_description = \"\"\"tcapy is a transaction cost analysis library", "a transaction cost analysis library for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0',", "= \"\"\"tcapy is a transaction cost analysis library for determining calculating your trading", "cost analysis library for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost", "\"\"\"tcapy is a transaction cost analysis library for determining calculating your trading costs\"\"\"", "version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction", "your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0',", "setup, find_packages long_description = \"\"\"tcapy is a transaction cost analysis library for determining", "<filename>setup.py from setuptools import setup, find_packages long_description = \"\"\"tcapy is a transaction cost", "trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description,", "author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost analysis'], url='https://github.com/cuemacro/tcapy', packages=find_packages(), include_package_data=True, install_requires=[],", "library for determining calculating your trading costs\"\"\" setup(name='tcapy', version='0.1.0', description='Tranasction cost analysis library',", "long_description = \"\"\"tcapy is a transaction cost analysis library for determining calculating your", "description='Tranasction cost analysis library', author='<NAME>', author_email='<EMAIL>', license='Apache 2.0', long_description=long_description, keywords=['pandas', 'TCA', 'transaction cost" ]
[ "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report):", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is to use a", ":param project_report: The project report containing the data to base the estimation on", "Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how", "projects = report['projects'] try: for project in projects: project_report = projects[project] eou_score =", "= 1 self.high_low = 6 self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask):", "\"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class Weight(object):", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "project report containing the data to base the estimation on :returns: The ease", "containing the data to base the estimation on :returns: The ease of use", "project_report: The project report to gather the data from \"\"\" required = {'generalData':", "passed to all tasks. They _might_ contain something that is useful for the", "the ease of use calculation is missing - \" \"generalData.documentation.exists.*\" ) exists =", "= 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy", "this software and associated documentation files (the \"Software\"), to deal in the Software", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "false interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given", "lower :param upper: The upper \"\"\" def __init__(self, name, lower, upper): self.name =", "exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website = exists['website'] def", "use score \"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples +", "self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is to", "None self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report):", "This is based on the following data: manual: generalData: documentation: exists: readme: false", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "estimates how easy it is to use a project. :param report: The report", "self.explanations = None self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def", "the Software without restriction, including without limitation the rights to use, copy, modify,", "person obtaining a copy of this software and associated documentation files (the \"Software\"),", "the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "= None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence", "License (MIT) Copyright (c) 2017 <NAME> Permission is hereby granted, free of charge,", "None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence data", "\"\"\" Estimates how easy it is to use a project. The basic formula", "missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme =", "for the project '{}' with \" \"the report\\n{}\".format( project, projects[project]) ) from e", "without restriction, including without limitation the rights to use, copy, modify, merge, publish,", "merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit", "score *= self.__weight.readme return score def scrab(self, report): \"\"\" The scrab task estimates", "score += self.__weight.high_low elif data.high: score += self.__weight.high elif data.low: score += self.__weight.low", "\"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}}", "of use calculation is missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download", "self.website = 3 self.download = 2 self.readme = 1 self.apis = 3 self.examples", "The scrab task estimates how easy it is to use a project. :param", "to gather the data from \"\"\" def __init__(self, project_report): self.website = None self.download", "ease of use for the project '{}' with \" \"the report\\n{}\".format( project, projects[project])", "= lower self.upper = upper class ScoreData(): \"\"\" Convenience class that gathers the", "for the ease of use calculation is missing - \" \"generalData.documentation.exists.*\" ) exists", "False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for", "= comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface", "should happen in the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version,", "class Weight(object): \"\"\" Convenience class that holds all weights needed for the ease", "score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low:", "in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "Class to estimate how easy it is to use a project. This is", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask", "Weight(object): \"\"\" Convenience class that holds all weights needed for the ease of", "sublicense, and/or sell copies of the Software, and to permit persons to whom", "this permission notice shall be included in all copies or substantial portions of", "..scrabTask import ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\"", "\"\"\" The MIT License (MIT) Copyright (c) 2017 <NAME> Permission is hereby granted,", "modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to", "3 self.download = 2 self.readme = 1 self.apis = 3 self.examples = 2", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "{'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required,", "it is to use a project. This is based on the following data:", "'exists': {'readme': False, 'website': False, 'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData(", "report containing the data to base the estimation on :returns: The ease of", "in self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "\"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required, project_report)):", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "contain something that is useful for the task, but the task has to", "\"\"\" Gathers the projects interface level data :param project_report: The project report to", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "self.readme = None self.apis = None self.examples = None self.explanations = None self.high", "'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of", "task has to check if it is _there_ as these are user provided.", "of use for the project '{}' with \" \"the report\\n{}\".format( project, projects[project]) )", "report to gather the data from \"\"\" def __init__(self, project_report): self.website = None", "notice and this permission notice shall be included in all copies or substantial", "self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it is", "self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence data :param project_report: The", "= \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions \"\"\" pass", "= ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low: score", "data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low: score += self.__weight.high_low elif data.high:", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", ":param name: The name :param lower: The lower :param upper: The upper \"\"\"", "use estimation \"\"\" def __init__(self): self.website = 3 self.download = 2 self.readme =", "ease of use calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel']", "task estimates how easy it is to use a project. :param report: The", "version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions \"\"\"", "charge, to any person obtaining a copy of this software and associated documentation", "existence * (completeness + level) :param project_report: The project report containing the data", "_might_ contain something that is useful for the task, but the task has", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "project_report): self.website = None self.download = None self.readme = None self.apis = None", "inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience", "needed for the ease of use estimation \"\"\" def __init__(self): self.website = 3", "__init__(self, name, lower, upper): self.name = name self.lower = lower self.upper = upper", "estimation for the projects. :param project_report: The project report to gather the data", "self.website = None self.download = None self.readme = None self.apis = None self.examples", "MissingManualData( \"Data for the ease of use calculation is missing - \" \"generalData.documentation.exists.*\"", "project_report: The project report containing the data to base the estimation on :returns:", "+= self.__weight.high elif data.low: score += self.__weight.low if data.website: score *= self.__weight.website elif", "eou_score is None: continue for limit in self.__limits: if(eou_score <= limit.upper and eou_score", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "= None self.apis = None self.examples = None self.explanations = None self.high =", "of the project sizes :param name: The name :param lower: The lower :param", "persons to whom the Software is furnished to do so, subject to the", "Gathers the documentation existence data :param project_report: The project report to gather the", "to gather the data from \"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis':", "Software is furnished to do so, subject to the following conditions: The above", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "to use a project. :param report: The report to analyse _and_ change :returns:", "= 6 self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to", "raise MissingManualData( \"Data for the ease of use calculation is missing - \"", ":param lower: The lower :param upper: The upper \"\"\" def __init__(self, name, lower,", "\"\"\" Gathers the documentation completeness data :param project_report: The project report to gather", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "{ 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise", "self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ]", "given explicitly for this task, for all projects, defined in the task.yaml :param", "from utils import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\"", "a project. This is based on the following data: manual: generalData: documentation: exists:", "as e: raise Exception( \"While estimating ease of use for the project '{}'", "on :returns: The ease of use score \"\"\" data = ScoreData(project_report) score =", "Copyright (c) 2017 <NAME> Permission is hereby granted, free of charge, to any", "is to use a project. The basic formula is existence * (completeness +", "level data :param project_report: The project report to gather the data from \"\"\"", "ease of use estimation for the projects. :param project_report: The project report to", "lower limits of the project sizes :param name: The name :param lower: The", "to deal in the Software without restriction, including without limitation the rights to", "if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of use calculation", "elif data.download: score *= self.__weight.download elif data.readme: score *= self.__weight.readme return score def", "lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it", "try: for project in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score", "it is to use a project. The basic formula is existence * (completeness", "for the ease of use estimation \"\"\" def __init__(self): self.website = 3 self.download", "Convenience class that holds all weights needed for the ease of use estimation", "self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence data :param project_report:", "self.upper = upper class ScoreData(): \"\"\" Convenience class that gathers the necessary data", "project report to gather the data from \"\"\" required = {'generalData': {'documentation': {", "\"\"\" Helper class to filter specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience", "to whom the Software is furnished to do so, subject to the following", "gather the data from \"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis': False,", "data for the ease of use estimation for the projects. :param project_report: The", "data to base the estimation on :returns: The ease of use score \"\"\"", "documentation files (the \"Software\"), to deal in the Software without restriction, including without", "for the ease of use calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp", "project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report):", "project sizes :param name: The name :param lower: The lower :param upper: The", "has to check if it is _there_ as these are user provided. If", "files (the \"Software\"), to deal in the Software without restriction, including without limitation", "the documentation completeness data :param project_report: The project report to gather the data", "Software without restriction, including without limitation the rights to use, copy, modify, merge,", "'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the", "def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level data :param project_report: The", "estimating ease of use for the project '{}' with \" \"the report\\n{}\".format( project,", "to do so, subject to the following conditions: The above copyright notice and", "\"\"\" from ..scrabTask import ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\" version", "self.__weight.low if data.website: score *= self.__weight.website elif data.download: score *= self.__weight.download elif data.readme:", "name, lower, upper): self.name = name self.lower = lower self.upper = upper class", "filter specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience class to hold the", "self.download = exists['download'] self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\"", "The ease of use score \"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis", "score *= self.__weight.download elif data.readme: score *= self.__weight.readme return score def scrab(self, report):", "{'readme': False, 'website': False, 'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data", "in the Software without restriction, including without limitation the rights to use, copy,", "data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low: score += self.__weight.high_low", "missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples =", "The lower :param upper: The upper \"\"\" def __init__(self, name, lower, upper): self.name", "needed to work that check should happen in the argHandler. \"\"\" def __init__(self,", "def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy',", "true low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for this", "download: false completeness: apis: false examples: false explanations: false interfaceLevel: high: true low:", "that will be passed to all tasks. They _might_ contain something that is", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "to any person obtaining a copy of this software and associated documentation files", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "upper and lower limits of the project sizes :param name: The name :param", "use estimation for the projects. :param project_report: The project report to gather the", "check if it is _there_ as these are user provided. If they are", "= 3 self.examples = 2 self.explanations = 1 self.high_low = 6 self.high =", "Exception( \"While estimating ease of use for the project '{}' with \" \"the", "existence data :param project_report: The project report to gather the data from \"\"\"", "to estimate how easy it is to use a project. This is based", "= inter['low'] class Weight(object): \"\"\" Convenience class that holds all weights needed for", "\"\"\" Convenience class to hold the upper and lower limits of the project", "the ease of use estimation for the projects. :param project_report: The project report", "self.examples = 2 self.explanations = 1 self.high_low = 6 self.high = 5 self.low", "the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits", "completeness data :param project_report: The project report to gather the data from \"\"\"", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from", "DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils import containedStructure", "\"Data for the ease of use calculation is missing - \" \"generalData.documentation.completeness.*\" )", "= exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness", "a copy of this software and associated documentation files (the \"Software\"), to deal", "is missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "is to use a project. :param report: The report to analyse _and_ change", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "class Limit(): \"\"\" Convenience class to hold the upper and lower limits of", "report to gather the data from \"\"\" required = {'generalData': {'documentation': { 'exists':", "project_report: The project report to gather the data from \"\"\" def __init__(self, project_report):", "= \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter specific", "__gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data :param project_report: The project report", "ease of use calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness']", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "all tasks. They _might_ contain something that is useful for the task, but", "data.low: score += self.__weight.high_low elif data.high: score += self.__weight.high elif data.low: score +=", "lower, upper): self.name = name self.lower = lower self.upper = upper class ScoreData():", "score \"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations", ") inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class Weight(object): \"\"\"", "sizes :param name: The name :param lower: The lower :param upper: The upper", "scrab task estimates how easy it is to use a project. :param report:", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import", "def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence data :param project_report: The project", "free of charge, to any person obtaining a copy of this software and", "and this permission notice shall be included in all copies or substantial portions", "and to permit persons to whom the Software is furnished to do so,", "that check should happen in the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation,", "use a project. This is based on the following data: manual: generalData: documentation:", "EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for project in projects: project_report =", "for the task, but the task has to check if it is _there_", "= {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData(", "'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of", "in the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args)", "on the following data: manual: generalData: documentation: exists: readme: false website: false download:", "\"\"\" projects = report['projects'] try: for project in projects: project_report = projects[project] eou_score", "that gathers the necessary data for the ease of use estimation for the", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "the necessary data for the ease of use estimation for the projects. :param", "exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data :param project_report: The", "exceptions \"\"\" pass class Limit(): \"\"\" Convenience class to hold the upper and", "lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates", "false website: false download: false completeness: apis: false examples: false explanations: false interfaceLevel:", "self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "project_report)): raise MissingManualData( \"Data for the ease of use calculation is missing -", "interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly", "and data.low: score += self.__weight.high_low elif data.high: score += self.__weight.high elif data.low: score", "return score def scrab(self, report): \"\"\" The scrab task estimates how easy it", "the ease of use estimation \"\"\" def __init__(self): self.website = 3 self.download =", "false examples: false explanations: false interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult", "False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of use", "gathers the necessary data for the ease of use estimation for the projects.", "- \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples']", "__init__(self): self.website = 3 self.download = 2 self.readme = 1 self.apis = 3", "= self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit in self.__limits: if(eou_score <=", "associated documentation files (the \"Software\"), to deal in the Software without restriction, including", "elif data.readme: score *= self.__weight.readme return score def scrab(self, report): \"\"\" The scrab", "project report to gather the data from \"\"\" def __init__(self, project_report): self.website =", "project_report): \"\"\" Gathers the documentation existence data :param project_report: The project report to", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low: score +=", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "weights needed for the ease of use estimation \"\"\" def __init__(self): self.website =", "self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level data", "= upper class ScoreData(): \"\"\" Convenience class that gathers the necessary data for", "project_report): \"\"\" Gathers the documentation completeness data :param project_report: The project report to", "notice shall be included in all copies or substantial portions of the Software.", "user provided. If they are needed to work that check should happen in", "None self.readme = None self.apis = None self.examples = None self.explanations = None", "are needed to work that check should happen in the argHandler. \"\"\" def", "the projects. :param project_report: The project report to gather the data from \"\"\"", "def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is to use a project.", "ease of use estimation \"\"\" def __init__(self): self.website = 3 self.download = 2", "to hold the upper and lower limits of the project sizes :param name:", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "copy of this software and associated documentation files (the \"Software\"), to deal in", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from", "= exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data :param project_report:", "global_args: Arguments that will be passed to all tasks. They _might_ contain something", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "{'generalData': {'documentation': { 'exists': {'readme': False, 'website': False, 'download': False}}}} if (not containedStructure(required,", "a project. The basic formula is existence * (completeness + level) :param project_report:", "for limit in self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] =", "estimate how easy it is to use a project. This is based on", "easy it is to use a project. This is based on the following", "contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "(MIT) Copyright (c) 2017 <NAME> Permission is hereby granted, free of charge, to", "hold the upper and lower limits of the project sizes :param name: The", ":param upper: The upper \"\"\" def __init__(self, name, lower, upper): self.name = name", "name: The name :param lower: The lower :param upper: The upper \"\"\" def", "None: continue for limit in self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower):", "_there_ as these are user provided. If they are needed to work that", "obtaining a copy of this software and associated documentation files (the \"Software\"), to", "and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise Exception(", "exists['download'] self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the", "] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is", "{'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)):", "the task has to check if it is _there_ as these are user", "is missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low", "= report['projects'] try: for project in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report)", "projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue for", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "of use estimation \"\"\" def __init__(self): self.website = 3 self.download = 2 self.readme", "use calculation is missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download =", "project_report): \"\"\" Gathers the projects interface level data :param project_report: The project report", "the data from \"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples':", "that holds all weights needed for the ease of use estimation \"\"\" def", "comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the", "that contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try:", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "\"\"\" pass class Limit(): \"\"\" Convenience class to hold the upper and lower", "how easy it is to use a project. :param report: The report to", "+ data.explanations*self.__weight.explanations ) if data.high and data.low: score += self.__weight.high_low elif data.high: score", "data.download: score *= self.__weight.download elif data.readme: score *= self.__weight.readme return score def scrab(self,", "None self.examples = None self.explanations = None self.high = None self.low = None", "*= self.__weight.website elif data.download: score *= self.__weight.download elif data.readme: score *= self.__weight.readme return", "\"\"\" The scrab task estimates how easy it is to use a project.", "project. :param report: The report to analyse _and_ change :returns: Report that contains", "parameter: Parameter given explicitly for this task, for all projects, defined in the", "IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils import containedStructure name", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise Exception( \"While", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", ":returns: The ease of use score \"\"\" data = ScoreData(project_report) score = (", "the following data: manual: generalData: documentation: exists: readme: false website: false download: false", "publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons", "will be passed to all tasks. They _might_ contain something that is useful", "= None self.examples = None self.explanations = None self.high = None self.low =", "from \"\"\" def __init__(self, project_report): self.website = None self.download = None self.readme =", "None self.apis = None self.examples = None self.explanations = None self.high = None", "including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,", "scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for project in", "limits of the project sizes :param name: The name :param lower: The lower", "\"\"\" def __init__(self, project_report): self.website = None self.download = None self.readme = None", "lower: The lower :param upper: The upper \"\"\" def __init__(self, name, lower, upper):", "data.low: score += self.__weight.low if data.website: score *= self.__weight.website elif data.download: score *=", "change :returns: Report that contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how", "required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required, project_report)): raise", "how easy it is to use a project. This is based on the", "for all projects, defined in the task.yaml :param global_args: Arguments that will be", "\"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations =", "all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation existence data :param", "data.readme: score *= self.__weight.readme return score def scrab(self, report): \"\"\" The scrab task", "name self.lower = lower self.upper = upper class ScoreData(): \"\"\" Convenience class that", "e: raise Exception( \"While estimating ease of use for the project '{}' with", "= inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience class that holds all", "required = {'generalData': {'documentation': { 'exists': {'readme': False, 'website': False, 'download': False}}}} if", "lower self.upper = upper class ScoreData(): \"\"\" Convenience class that gathers the necessary", "class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it is to use a", "the data to base the estimation on :returns: The ease of use score", "The MIT License (MIT) Copyright (c) 2017 <NAME> Permission is hereby granted, free", "\"\"\" Convenience class that holds all weights needed for the ease of use", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "+ level) :param project_report: The project report containing the data to base the", "{'documentation': { 'exists': {'readme': False, 'website': False, 'download': False}}}} if (not containedStructure(required, project_report)):", "data :param project_report: The project report to gather the data from \"\"\" required", "= limit.name except Exception as e: raise Exception( \"While estimating ease of use", "documentation existence data :param project_report: The project report to gather the data from", ") comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations']", ":param report: The report to analyse _and_ change :returns: Report that contains all", "(not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of use calculation is", "report: The report to analyse _and_ change :returns: Report that contains all scrabbed", "true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for this task, for", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the", "to gather the data from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low':", "for the projects. :param project_report: The project report to gather the data from", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "data: manual: generalData: documentation: exists: readme: false website: false download: false completeness: apis:", "these are user provided. If they are needed to work that check should", "projects, defined in the task.yaml :param global_args: Arguments that will be passed to", "interface level data :param project_report: The project report to gather the data from", "= None self.download = None self.readme = None self.apis = None self.examples =", "= projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit in", "the estimation on :returns: The ease of use score \"\"\" data = ScoreData(project_report)", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "something that is useful for the task, but the task has to check", "it is _there_ as these are user provided. If they are needed to", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "2 self.explanations = 1 self.high_low = 6 self.high = 5 self.low = 1", "formula is existence * (completeness + level) :param project_report: The project report containing", "above copyright notice and this permission notice shall be included in all copies", "from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required,", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "name :param lower: The lower :param upper: The upper \"\"\" def __init__(self, name,", "= exists['download'] self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers", "task.yaml :param global_args: Arguments that will be passed to all tasks. They _might_", "a project. :param report: The report to analyse _and_ change :returns: Report that", "= comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level data :param", "= 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it is to", "they are needed to work that check should happen in the argHandler. \"\"\"", "\"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations )", "2017 <NAME> Permission is hereby granted, free of charge, to any person obtaining", "that is useful for the task, but the task has to check if", "'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData(", ":param parameter: Parameter given explicitly for this task, for all projects, defined in", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "permission notice shall be included in all copies or substantial portions of the", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "of use calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high", "of use score \"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples", "the documentation existence data :param project_report: The project report to gather the data", "version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult',", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "self.name = name self.lower = lower self.upper = upper class ScoreData(): \"\"\" Convenience", "\"\"\" Gathers the documentation existence data :param project_report: The project report to gather", "is None: continue for limit in self.__limits: if(eou_score <= limit.upper and eou_score >=", "upper): self.name = name self.lower = lower self.upper = upper class ScoreData(): \"\"\"", "task, but the task has to check if it is _there_ as these", "the following conditions: The above copyright notice and this permission notice shall be", "continue for limit in self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation']", "Arguments that will be passed to all tasks. They _might_ contain something that", "\"Data for the ease of use calculation is missing - \" \"generalData.documentation.exists.*\" )", "_and_ change :returns: Report that contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\"", "'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of", "= 2 self.readme = 1 self.apis = 3 self.examples = 2 self.explanations =", "Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for project in projects: project_report", "= {'generalData': {'documentation': { 'exists': {'readme': False, 'website': False, 'download': False}}}} if (not", "self.__weight.high_low elif data.high: score += self.__weight.high elif data.low: score += self.__weight.low if data.website:", "self.apis = comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\"", "to use a project. This is based on the following data: manual: generalData:", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "If they are needed to work that check should happen in the argHandler.", "use a project. :param report: The report to analyse _and_ change :returns: Report", "furnished to do so, subject to the following conditions: The above copyright notice", "\"Data for the ease of use calculation is missing - \" \"generalData.interfaceLevel.*\" )", "the task.yaml :param global_args: Arguments that will be passed to all tasks. They", "\"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions \"\"\" pass class", "from \"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations':", "score def scrab(self, report): \"\"\" The scrab task estimates how easy it is", "+= self.__weight.low if data.website: score *= self.__weight.website elif data.download: score *= self.__weight.download elif", "estimation on :returns: The ease of use score \"\"\" data = ScoreData(project_report) score", "permit persons to whom the Software is furnished to do so, subject to", "'website': False, 'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the", "{'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for", "any person obtaining a copy of this software and associated documentation files (the", "ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception):", "gather the data from \"\"\" required = {'generalData': {'documentation': { 'exists': {'readme': False,", "= {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if (not", "= 2 self.explanations = 1 self.high_low = 6 self.high = 5 self.low =", "copies of the Software, and to permit persons to whom the Software is", "is existence * (completeness + level) :param project_report: The project report containing the", "The project report containing the data to base the estimation on :returns: The", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "project. This is based on the following data: manual: generalData: documentation: exists: readme:", "Helper class to filter specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience class", "except Exception as e: raise Exception( \"While estimating ease of use for the", "the upper and lower limits of the project sizes :param name: The name", "data.high: score += self.__weight.high elif data.low: score += self.__weight.low if data.website: score *=", "included in all copies or substantial portions of the Software. THE SOFTWARE IS", "= [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight", "\"While estimating ease of use for the project '{}' with \" \"the report\\n{}\".format(", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and", "def scrab(self, report): \"\"\" The scrab task estimates how easy it is to", "SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\"", "(completeness + level) :param project_report: The project report containing the data to base", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal',", "{'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask", "the Software, and to permit persons to whom the Software is furnished to", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "basic formula is existence * (completeness + level) :param project_report: The project report", "happen in the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter,", "report['projects'] try: for project in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,", "upper \"\"\" def __init__(self, name, lower, upper): self.name = name self.lower = lower", "Gathers the projects interface level data :param project_report: The project report to gather", "= Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is to use", "projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise Exception( \"While estimating ease of", "documentation: exists: readme: false website: false download: false completeness: apis: false examples: false", "following conditions: The above copyright notice and this permission notice shall be included", "project_report): \"\"\" Estimates how easy it is to use a project. The basic", "EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for this task, for all projects,", "\"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [", "to analyse _and_ change :returns: Report that contains all scrabbed information Example: EaseOfUseEstimation:", "data.high and data.low: score += self.__weight.high_low elif data.high: score += self.__weight.high elif data.low:", "name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter", "copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", ":param project_report: The project report to gather the data from \"\"\" required =", "The report to analyse _and_ change :returns: Report that contains all scrabbed information", "Convenience class to hold the upper and lower limits of the project sizes", "ScoreData(): \"\"\" Convenience class that gathers the necessary data for the ease of", "class that gathers the necessary data for the ease of use estimation for", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit in self.__limits:", "project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit", ":param project_report: The project report to gather the data from \"\"\" def __init__(self,", "The above copyright notice and this permission notice shall be included in all", "required = {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False, 'explanations': False}}}} if", "task, for all projects, defined in the task.yaml :param global_args: Arguments that will", "eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit in self.__limits: if(eou_score", "difficult \"\"\" projects = report['projects'] try: for project in projects: project_report = projects[project]", "to work that check should happen in the argHandler. \"\"\" def __init__(self, parameter,", "the data from \"\"\" def __init__(self, project_report): self.website = None self.download = None", "report): \"\"\" The scrab task estimates how easy it is to use a", "necessary data for the ease of use estimation for the projects. :param project_report:", "for project in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is", "The upper \"\"\" def __init__(self, name, lower, upper): self.name = name self.lower =", "lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self,", "self.__weight.download elif data.readme: score *= self.__weight.readme return score def scrab(self, report): \"\"\" The", "scrab(self, report): \"\"\" The scrab task estimates how easy it is to use", "__init__(self, project_report): self.website = None self.download = None self.readme = None self.apis =", ") if data.high and data.low: score += self.__weight.high_low elif data.high: score += self.__weight.high", "self.high_low = 6 self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class", "useful for the task, but the task has to check if it is", "\"Software\"), to deal in the Software without restriction, including without limitation the rights", "ease of use calculation is missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists']", "self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects", "Estimates how easy it is to use a project. The basic formula is", "deal in the Software without restriction, including without limitation the rights to use,", "self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\"", "granted, free of charge, to any person obtaining a copy of this software", "= None self.readme = None self.apis = None self.examples = None self.explanations =", "limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "project '{}' with \" \"the report\\n{}\".format( project, projects[project]) ) from e return report", "Limit(): \"\"\" Convenience class to hold the upper and lower limits of the", "data from \"\"\" required = {'generalData': {'documentation': { 'completeness': {'apis': False, 'examples': False,", "6 self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate", "global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1)", "exists['readme'] self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data", "{'high': False, 'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the", "__estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy it is to use a project. The", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and", "calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis']", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "of this software and associated documentation files (the \"Software\"), to deal in the", "= 1 self.apis = 3 self.examples = 2 self.explanations = 1 self.high_low =", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "elif data.low: score += self.__weight.low if data.website: score *= self.__weight.website elif data.download: score", "to all tasks. They _might_ contain something that is useful for the task,", ">= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise Exception( \"While estimating", "are user provided. If they are needed to work that check should happen", ":returns: Report that contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects =", "sell copies of the Software, and to permit persons to whom the Software", "\" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations", "low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for this task,", "the data from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if", "self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception", "limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise Exception( \"While estimating ease", "if it is _there_ as these are user provided. If they are needed", "inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience class that holds all weights", "score *= self.__weight.website elif data.download: score *= self.__weight.download elif data.readme: score *= self.__weight.readme", "Exception as e: raise Exception( \"While estimating ease of use for the project", "import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class", "2 self.readme = 1 self.apis = 3 self.examples = 2 self.explanations = 1", "self.lower = lower self.upper = upper class ScoreData(): \"\"\" Convenience class that gathers", "do so, subject to the following conditions: The above copyright notice and this", "use calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis =", "completeness: apis: false examples: false explanations: false interfaceLevel: high: true low: true Example:", "argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits =", "containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of use calculation is missing", "Gathers the documentation completeness data :param project_report: The project report to gather the", "THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils import containedStructure name =", "limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e: raise", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils", "high: true low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for", "is furnished to do so, subject to the following conditions: The above copyright", "self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9),", "report to gather the data from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False,", "to base the estimation on :returns: The ease of use score \"\"\" data", "= project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website = exists['website'] def __gather_completness(self,", "Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report):", "false completeness: apis: false examples: false explanations: false interfaceLevel: high: true low: true", "<= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as e:", "project in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None:", "easy it is to use a project. :param report: The report to analyse", "The project report to gather the data from \"\"\" required = {'generalData': {'interfaceLevel':", ":param global_args: Arguments that will be passed to all tasks. They _might_ contain", "[ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight =", "comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level data :param project_report:", "so, subject to the following conditions: The above copyright notice and this permission", "work that check should happen in the argHandler. \"\"\" def __init__(self, parameter, global_args):", "containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to", "Parameter given explicitly for this task, for all projects, defined in the task.yaml", "comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level", "difficult :param parameter: Parameter given explicitly for this task, for all projects, defined", "tasks. They _might_ contain something that is useful for the task, but the", "false download: false completeness: apis: false examples: false explanations: false interfaceLevel: high: true", "The project report to gather the data from \"\"\" required = {'generalData': {'documentation':", "{ 'exists': {'readme': False, 'website': False, 'download': False}}}} if (not containedStructure(required, project_report)): raise", "self.high = inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience class that holds", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "how easy it is to use a project. The basic formula is existence", "to gather the data from \"\"\" required = {'generalData': {'documentation': { 'exists': {'readme':", "= project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience class", "use calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high =", "\"\"\" required = {'generalData': {'documentation': { 'exists': {'readme': False, 'website': False, 'download': False}}}}", "\"\"\" def __init__(self): self.website = 3 self.download = 2 self.readme = 1 self.apis", "of the Software, and to permit persons to whom the Software is furnished", "parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17),", "and/or sell copies of the Software, and to permit persons to whom the", "projects. :param project_report: The project report to gather the data from \"\"\" def", "false explanations: false interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult :param parameter:", "upper: The upper \"\"\" def __init__(self, name, lower, upper): self.name = name self.lower", "\"\"\" def __init__(self, name, lower, upper): self.name = name self.lower = lower self.upper", "data from \"\"\" required = {'generalData': {'documentation': { 'exists': {'readme': False, 'website': False,", "explicitly for this task, for all projects, defined in the task.yaml :param global_args:", "= None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers", "= comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self, project_report): \"\"\" Gathers", "of charge, to any person obtaining a copy of this software and associated", "(the \"Software\"), to deal in the Software without restriction, including without limitation the", "self.apis = None self.examples = None self.explanations = None self.high = None self.low", "self.explanations = 1 self.high_low = 6 self.high = 5 self.low = 1 class", "is useful for the task, but the task has to check if it", "use a project. The basic formula is existence * (completeness + level) :param", "ease of use score \"\"\" data = ScoreData(project_report) score = ( data.apis*self.__weight.apis +", "for the ease of use calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter", "base the estimation on :returns: The ease of use score \"\"\" data =", "copyright notice and this permission notice shall be included in all copies or", "data from \"\"\" def __init__(self, project_report): self.website = None self.download = None self.readme", "The basic formula is existence * (completeness + level) :param project_report: The project", "all projects, defined in the task.yaml :param global_args: Arguments that will be passed", "5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it", "to permit persons to whom the Software is furnished to do so, subject", "of use estimation for the projects. :param project_report: The project report to gather", "- \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low']", "(c) 2017 <NAME> Permission is hereby granted, free of charge, to any person", "calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high']", "to filter specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience class to hold", "check should happen in the argHandler. \"\"\" def __init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name,", "for the ease of use estimation for the projects. :param project_report: The project", "conditions: The above copyright notice and this permission notice shall be included in", "{'apis': False, 'examples': False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data", "from ..scrabTask import ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\" version =", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "class ScoreData(): \"\"\" Convenience class that gathers the necessary data for the ease", "estimation \"\"\" def __init__(self): self.website = 3 self.download = 2 self.readme = 1", "upper class ScoreData(): \"\"\" Convenience class that gathers the necessary data for the", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "Permission is hereby granted, free of charge, to any person obtaining a copy", "following data: manual: generalData: documentation: exists: readme: false website: false download: false completeness:", "comp = project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def", "be included in all copies or substantial portions of the Software. THE SOFTWARE", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "= project_report['generalData']['documentation']['completeness'] self.apis = comp['apis'] self.examples = comp['examples'] self.explanations = comp['explanations'] def __gather_interface_level(self,", "specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience class to hold the upper", "+ data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high and data.low: score += self.__weight.high_low elif", "whom the Software is furnished to do so, subject to the following conditions:", "class that holds all weights needed for the ease of use estimation \"\"\"", "self.__estimate_ease_of_use(project_report) if eou_score is None: continue for limit in self.__limits: if(eou_score <= limit.upper", "data.explanations*self.__weight.explanations ) if data.high and data.low: score += self.__weight.high_low elif data.high: score +=", "= ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if data.high", "+= self.__weight.high_low elif data.high: score += self.__weight.high elif data.low: score += self.__weight.low if", "False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease of use", "the ease of use calculation is missing - \" \"generalData.interfaceLevel.*\" ) inter =", "self.apis = 3 self.examples = 2 self.explanations = 1 self.high_low = 6 self.high", "information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for project in projects:", "score += self.__weight.high elif data.low: score += self.__weight.low if data.website: score *= self.__weight.website", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "the data from \"\"\" required = {'generalData': {'documentation': { 'exists': {'readme': False, 'website':", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "\"\"\" Convenience class that gathers the necessary data for the ease of use", "the ease of use calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp =", "__init__(self, parameter, global_args): super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24,", "\" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website", "level) :param project_report: The project report containing the data to base the estimation", "parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8,", "MissingManualData( \"Data for the ease of use calculation is missing - \" \"generalData.documentation.completeness.*\"", "score += self.__weight.low if data.website: score *= self.__weight.website elif data.download: score *= self.__weight.download", "class MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions \"\"\" pass class Limit():", "MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions \"\"\" pass class Limit(): \"\"\"", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "self.low = inter['low'] class Weight(object): \"\"\" Convenience class that holds all weights needed", "calculation is missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download']", "super(EaseOfUseEstimation, self).__init__(name, version, parameter, global_args) self.__limits = [ Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16,", "readme: false website: false download: false completeness: apis: false examples: false explanations: false", "utils import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper", "based on the following data: manual: generalData: documentation: exists: readme: false website: false", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "distribute, sublicense, and/or sell copies of the Software, and to permit persons to", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "self.__weight.readme return score def scrab(self, report): \"\"\" The scrab task estimates how easy", "1 class EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it is to use", "software and associated documentation files (the \"Software\"), to deal in the Software without", "data from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}} if (not", "the project '{}' with \" \"the report\\n{}\".format( project, projects[project]) ) from e return", "self.__weight.high elif data.low: score += self.__weight.low if data.website: score *= self.__weight.website elif data.download:", "import ReportTask from utils import containedStructure name = \"EaseOfUseEstimation\" version = \"1.0.0\" class", "explanations: false interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult :param parameter: Parameter", "= None self.explanations = None self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report)", "if data.website: score *= self.__weight.website elif data.download: score *= self.__weight.download elif data.readme: score", "upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\" Estimates how easy", "is to use a project. This is based on the following data: manual:", "shall be included in all copies or substantial portions of the Software. THE", "self.website = exists['website'] def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data :param", "*= self.__weight.readme return score def scrab(self, report): \"\"\" The scrab task estimates how", "defined in the task.yaml :param global_args: Arguments that will be passed to all", "manual: generalData: documentation: exists: readme: false website: false download: false completeness: apis: false", ") exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website = exists['website']", "self.download = None self.readme = None self.apis = None self.examples = None self.explanations", "to use a project. The basic formula is existence * (completeness + level)", "and lower limits of the project sizes :param name: The name :param lower:", "all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects'] try: for project", "projects interface level data :param project_report: The project report to gather the data", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "from \"\"\" required = {'generalData': {'documentation': { 'exists': {'readme': False, 'website': False, 'download':", "False, 'explanations': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the documentation", "gather the data from \"\"\" def __init__(self, project_report): self.website = None self.download =", "is based on the following data: manual: generalData: documentation: exists: readme: false website:", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "the project sizes :param name: The name :param lower: The lower :param upper:", "for this task, for all projects, defined in the task.yaml :param global_args: Arguments", "class to hold the upper and lower limits of the project sizes :param", "as these are user provided. If they are needed to work that check", "= None self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self,", "False, 'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease", "pass class Limit(): \"\"\" Convenience class to hold the upper and lower limits", "Example: EaseOfUseEstimation: difficult :param parameter: Parameter given explicitly for this task, for all", "the Software is furnished to do so, subject to the following conditions: The", "def __init__(self, name, lower, upper): self.name = name self.lower = lower self.upper =", "* (completeness + level) :param project_report: The project report containing the data to", "MIT License (MIT) Copyright (c) 2017 <NAME> Permission is hereby granted, free of", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "the projects interface level data :param project_report: The project report to gather the", "exists: readme: false website: false download: false completeness: apis: false examples: false explanations:", "this task, for all projects, defined in the task.yaml :param global_args: Arguments that", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "data = ScoreData(project_report) score = ( data.apis*self.__weight.apis + data.examples*self.__weight.examples + data.explanations*self.__weight.explanations ) if", "is missing - \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme", "self.examples = None self.explanations = None self.high = None self.low = None self.__gather_existence(project_report)", "but the task has to check if it is _there_ as these are", "The name :param lower: The lower :param upper: The upper \"\"\" def __init__(self,", "report to gather the data from \"\"\" required = {'generalData': {'documentation': { 'completeness':", "subject to the following conditions: The above copyright notice and this permission notice", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "inter['low'] class Weight(object): \"\"\" Convenience class that holds all weights needed for the", "project report to gather the data from \"\"\" required = {'generalData': {'interfaceLevel': {'high':", "EaseOfUseEstimation(ReportTask): \"\"\" Class to estimate how easy it is to use a project.", "if eou_score is None: continue for limit in self.__limits: if(eou_score <= limit.upper and", "documentation completeness data :param project_report: The project report to gather the data from", "if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name except Exception as", "def __init__(self, project_report): self.website = None self.download = None self.readme = None self.apis", "def __gather_completness(self, project_report): \"\"\" Gathers the documentation completeness data :param project_report: The project", "False, 'website': False, 'download': False}}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for", "the task, but the task has to check if it is _there_ as", "Report that contains all scrabbed information Example: EaseOfUseEstimation: difficult \"\"\" projects = report['projects']", "__gather_interface_level(self, project_report): \"\"\" Gathers the projects interface level data :param project_report: The project", "gather the data from \"\"\" required = {'generalData': {'interfaceLevel': {'high': False, 'low': False}}}", "to check if it is _there_ as these are user provided. If they", "is hereby granted, free of charge, to any person obtaining a copy of", "class to filter specific exceptions \"\"\" pass class Limit(): \"\"\" Convenience class to", "all weights needed for the ease of use estimation \"\"\" def __init__(self): self.website", "and associated documentation files (the \"Software\"), to deal in the Software without restriction,", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Limit(name='easy', upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight()", "raise Exception( \"While estimating ease of use for the project '{}' with \"", "__gather_existence(self, project_report): \"\"\" Gathers the documentation existence data :param project_report: The project report", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "MissingManualData( \"Data for the ease of use calculation is missing - \" \"generalData.interfaceLevel.*\"", "use for the project '{}' with \" \"the report\\n{}\".format( project, projects[project]) ) from", "self.readme = 1 self.apis = 3 self.examples = 2 self.explanations = 1 self.high_low", "\"\"\" Class to estimate how easy it is to use a project. This", "missing - \" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low =", "upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def __estimate_ease_of_use(self, project_report): \"\"\"", "They _might_ contain something that is useful for the task, but the task", "hereby granted, free of charge, to any person obtaining a copy of this", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "Convenience class that gathers the necessary data for the ease of use estimation", "is _there_ as these are user provided. If they are needed to work", "be passed to all tasks. They _might_ contain something that is useful for", "3 self.examples = 2 self.explanations = 1 self.high_low = 6 self.high = 5", "limit in self.__limits: if(eou_score <= limit.upper and eou_score >= limit.lower): projects[project]['EaseOfUseEstimation'] = limit.name", "1 self.apis = 3 self.examples = 2 self.explanations = 1 self.high_low = 6", "provided. If they are needed to work that check should happen in the", "None self.download = None self.readme = None self.apis = None self.examples = None", "False, 'low': False}}} if (not containedStructure(required, project_report)): raise MissingManualData( \"Data for the ease", "self.download = 2 self.readme = 1 self.apis = 3 self.examples = 2 self.explanations", "data.website: score *= self.__weight.website elif data.download: score *= self.__weight.download elif data.readme: score *=", "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute,", "in projects: project_report = projects[project] eou_score = self.__estimate_ease_of_use(project_report) if eou_score is None: continue", "= name self.lower = lower self.upper = upper class ScoreData(): \"\"\" Convenience class", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" from ..scrabTask import ReportTask from utils import", "to the following conditions: The above copyright notice and this permission notice shall", "The project report to gather the data from \"\"\" def __init__(self, project_report): self.website", "apis: false examples: false explanations: false interfaceLevel: high: true low: true Example: EaseOfUseEstimation:", "limit.name except Exception as e: raise Exception( \"While estimating ease of use for", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "Software, and to permit persons to whom the Software is furnished to do", "it is to use a project. :param report: The report to analyse _and_", "elif data.high: score += self.__weight.high elif data.low: score += self.__weight.low if data.website: score", "if data.high and data.low: score += self.__weight.high_low elif data.high: score += self.__weight.high elif", "self.__weight.website elif data.download: score *= self.__weight.download elif data.readme: score *= self.__weight.readme return score", "examples: false explanations: false interfaceLevel: high: true low: true Example: EaseOfUseEstimation: difficult :param", "1 self.high_low = 6 self.high = 5 self.low = 1 class EaseOfUseEstimation(ReportTask): \"\"\"", "upper=24, lower=17), Limit(name='normal', upper=16, lower=9), Limit(name='difficult', upper=8, lower=1) ] self.__weight = Weight() def", "analyse _and_ change :returns: Report that contains all scrabbed information Example: EaseOfUseEstimation: difficult", "= 3 self.download = 2 self.readme = 1 self.apis = 3 self.examples =", "generalData: documentation: exists: readme: false website: false download: false completeness: apis: false examples:", "holds all weights needed for the ease of use estimation \"\"\" def __init__(self):", "report to analyse _and_ change :returns: Report that contains all scrabbed information Example:", "in the task.yaml :param global_args: Arguments that will be passed to all tasks.", "website: false download: false completeness: apis: false examples: false explanations: false interfaceLevel: high:", "def __init__(self): self.website = 3 self.download = 2 self.readme = 1 self.apis =", "<reponame>Eyenseo/gitScrabber<filename>gitScrabber/scrabTasks/report/easeOfUseEstimation.py \"\"\" The MIT License (MIT) Copyright (c) 2017 <NAME> Permission is hereby", "\" \"generalData.interfaceLevel.*\" ) inter = project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class", "<NAME> Permission is hereby granted, free of charge, to any person obtaining a", "project. The basic formula is existence * (completeness + level) :param project_report: The", "- \" \"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme']", "None self.explanations = None self.high = None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report)", "project_report['generalData']['interfaceLevel'] self.high = inter['high'] self.low = inter['low'] class Weight(object): \"\"\" Convenience class that", "easy it is to use a project. The basic formula is existence *", "\"generalData.documentation.exists.*\" ) exists = project_report['generalData']['documentation']['exists'] self.download = exists['download'] self.readme = exists['readme'] self.website =", "*= self.__weight.download elif data.readme: score *= self.__weight.readme return score def scrab(self, report): \"\"\"", "\"EaseOfUseEstimation\" version = \"1.0.0\" class MissingManualData(Exception): \"\"\" Helper class to filter specific exceptions", "None self.low = None self.__gather_existence(project_report) self.__gather_completness(project_report) self.__gather_interface_level(project_report) def __gather_existence(self, project_report): \"\"\" Gathers the", "of use calculation is missing - \" \"generalData.documentation.completeness.*\" ) comp = project_report['generalData']['documentation']['completeness'] self.apis" ]
[ "self.selection = selection self.combination = combination self.mutation = mutation self.solution = solution self.goal_error", "# Template method population = self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter):", "num_iter self.selection = selection self.combination = combination self.mutation = mutation self.solution = solution", "mutation, solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection = selection self.combination", "= self.solution(population).error if i % 1000 == 0: if iteration_min_error < min_error: min_error", "iteration_min_error print(\"Found new best, iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error <", "= num_iter self.selection = selection self.combination = combination self.mutation = mutation self.solution =", "from individual import Individual class GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation,", "{}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal error, terminating.\") break", "selection self.combination = combination self.mutation = mutation self.solution = solution self.goal_error = goal_error", "i % 1000 == 0: if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found", "population = self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter): population, comb_population =", "self.mutation = mutation self.solution = solution self.goal_error = goal_error def evolution(self, neural_net): #", "evolution(self, neural_net): # Template method population = self.population_generation() min_error = self.solution(population).error for i", "self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000 == 0: if iteration_min_error", "population_generation self.num_iter = num_iter self.selection = selection self.combination = combination self.mutation = mutation", "GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation = population_generation", "goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection = selection self.combination = combination", "new best, iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached", "self.solution(population).error for i in range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population", "self.num_iter = num_iter self.selection = selection self.combination = combination self.mutation = mutation self.solution", "selection, combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection =", "for i in range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population =", "neural_net): # Template method population = self.population_generation() min_error = self.solution(population).error for i in", "1000 == 0: if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new best,", "import Individual class GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error):", "self.solution = solution self.goal_error = goal_error def evolution(self, neural_net): # Template method population", "__init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter =", "mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000 == 0:", "% 1000 == 0: if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new", "= selection self.combination = combination self.mutation = mutation self.solution = solution self.goal_error =", "self.solution(population).error if i % 1000 == 0: if iteration_min_error < min_error: min_error =", "min_error = self.solution(population).error for i in range(self.num_iter): population, comb_population = self.selection(population) combined_population =", "in range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population)", "= goal_error def evolution(self, neural_net): # Template method population = self.population_generation() min_error =", "def evolution(self, neural_net): # Template method population = self.population_generation() min_error = self.solution(population).error for", "min_error = iteration_min_error print(\"Found new best, iteration = {}, {}\".format( i, self.solution(population))) if", "Template method population = self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter): population,", "self.combination = combination self.mutation = mutation self.solution = solution self.goal_error = goal_error def", "method population = self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter): population, comb_population", "solution self.goal_error = goal_error def evolution(self, neural_net): # Template method population = self.population_generation()", "= combination self.mutation = mutation self.solution = solution self.goal_error = goal_error def evolution(self,", "= {}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal error, terminating.\")", "population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000 == 0: if iteration_min_error <", "if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new best, iteration = {},", "= self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000", "i in range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population)", "iteration_min_error = self.solution(population).error if i % 1000 == 0: if iteration_min_error < min_error:", "min_error: min_error = iteration_min_error print(\"Found new best, iteration = {}, {}\".format( i, self.solution(population)))", "== 0: if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new best, iteration", "goal_error def evolution(self, neural_net): # Template method population = self.population_generation() min_error = self.solution(population).error", "< min_error: min_error = iteration_min_error print(\"Found new best, iteration = {}, {}\".format( i,", "best, iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal", "iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new best, iteration = {}, {}\".format(", "solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection = selection self.combination =", "0: if iteration_min_error < min_error: min_error = iteration_min_error print(\"Found new best, iteration =", "self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter): population, comb_population = self.selection(population) combined_population", "combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i %", "combination self.mutation = mutation self.solution = solution self.goal_error = goal_error def evolution(self, neural_net):", "Individual class GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation", "class GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation =", "= iteration_min_error print(\"Found new best, iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error", "self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000 ==", "mutation self.solution = solution self.goal_error = goal_error def evolution(self, neural_net): # Template method", "= solution self.goal_error = goal_error def evolution(self, neural_net): # Template method population =", "{}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal error, terminating.\") break return", "individual import Individual class GeneticAlgorithm: def __init__(self, population_generation, num_iter, selection, combination, mutation, solution,", "combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection = selection", "self.population_generation = population_generation self.num_iter = num_iter self.selection = selection self.combination = combination self.mutation", "= self.solution(population).error for i in range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population)", "= self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i % 1000 == 0: if", "if i % 1000 == 0: if iteration_min_error < min_error: min_error = iteration_min_error", "self.goal_error = goal_error def evolution(self, neural_net): # Template method population = self.population_generation() min_error", "self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if i", "population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error =", "num_iter, selection, combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter self.selection", "= self.population_generation() min_error = self.solution(population).error for i in range(self.num_iter): population, comb_population = self.selection(population)", "comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error", "i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal error, terminating.\") break return self.solution(population)", "= self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error = self.solution(population).error if", "= mutation self.solution = solution self.goal_error = goal_error def evolution(self, neural_net): # Template", "population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter = num_iter", "iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error: print(\"Reached goal error,", "print(\"Found new best, iteration = {}, {}\".format( i, self.solution(population))) if iteration_min_error < self.goal_error:", "def __init__(self, population_generation, num_iter, selection, combination, mutation, solution, goal_error): self.population_generation = population_generation self.num_iter", "range(self.num_iter): population, comb_population = self.selection(population) combined_population = self.combination(comb_population) mutated_population = self.mutation(combined_population) population.extend(mutated_population) iteration_min_error", "= population_generation self.num_iter = num_iter self.selection = selection self.combination = combination self.mutation =" ]
[]
[ "body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print e def send_message(self, to_number,", "import twilio import twilio.rest class Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid =", "account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number = phone_number def", "_request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number,", "self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print", "message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print e", "client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print e def send_message(self,", "self.phone_number = phone_number def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message", "= phone_number def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message =", "__init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number = phone_number", "= auth_token self.phone_number = phone_number def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid,", "twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e:", "account_sid self.auth_token = auth_token self.phone_number = phone_number def _request(self, to_number, message): try: client", "def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message,", "except twilio.TwilioRestException as e: print e def send_message(self, to_number, message): return self._request(to_number, message)", "message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number )", "to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number", "self.auth_token = auth_token self.phone_number = phone_number def _request(self, to_number, message): try: client =", "twilio import twilio.rest class Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid", "auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number = phone_number def _request(self,", "try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except", "import twilio.rest class Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token", "= twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as", ") except twilio.TwilioRestException as e: print e def send_message(self, to_number, message): return self._request(to_number,", "client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException", "from_=self.phone_number ) except twilio.TwilioRestException as e: print e def send_message(self, to_number, message): return", "auth_token self.phone_number = phone_number def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token)", "phone_number def _request(self, to_number, message): try: client = twilio.rest.TwilioRestClient(self.account_sid, self.auth_token) message = client.messages.create(", "to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print e def send_message(self, to_number, message):", "def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number =", "Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number", "twilio.rest class Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token =", "= client.messages.create( body=message, to=to_number, from_=self.phone_number ) except twilio.TwilioRestException as e: print e def", "self.account_sid = account_sid self.auth_token = auth_token self.phone_number = phone_number def _request(self, to_number, message):", "class Notify(object): def __init__(self, account_sid, auth_token, phone_number): self.account_sid = account_sid self.auth_token = auth_token", "phone_number): self.account_sid = account_sid self.auth_token = auth_token self.phone_number = phone_number def _request(self, to_number,", "= account_sid self.auth_token = auth_token self.phone_number = phone_number def _request(self, to_number, message): try:" ]
[ "self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda:", "-> dict: return self._attributes async def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\"", "logging from collections import defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity", "\"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support repeaters at the moment \"\"\"", "if an error happend during toggling the switch ) self._attributes = defaultdict(str) super().__init__()", "self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True)", "import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from", "from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL", "\"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon =", "entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if", "name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools):", "an error happend during toggling the switch ) self._attributes = defaultdict(str) super().__init__() @property", "- datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]:", "self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr", "= last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await", "fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support repeaters", "be online initially self._is_available = ( True # set to False if an", "= self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds)", "datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\",", "moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon", "import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import", "def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def available(self) ->", "_LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True) self._is_available = False async def", "\"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools =", "\"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True", "\"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error", "self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"]", "= hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support repeaters at", "True # set to False if an error happend during toggling the switch", "device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self): self._is_on = True try: if", "self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on", "class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def", "return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool: return", "self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the fritzbox to be", "= logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities", "as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import", "from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def", "not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity):", "= True # We assume the fritzbox to be online initially self._is_available =", "self._is_on = True # We assume the fritzbox to be online initially self._is_available", "= ( True # set to False if an error happend during toggling", "fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume", "Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools", "-> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\"", "self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def", "in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except", "\"\"\"AVM Fritz!Box connectivitiy sensor\"\"\" import logging from collections import defaultdict import datetime try:", "FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self,", "DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry:", "getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True) self._is_available", "from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing", "homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL =", "We do not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True", "self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True", "self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect", "in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on =", "await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\",", "SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) ->", "dict: return self._attributes async def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in", "connectivitiy sensor\"\"\" import logging from collections import defaultdict import datetime try: from homeassistant.components.binary_sensor", "is_on(self) -> bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self):", "exc_info=True) self._is_available = False async def async_update(self) -> None: _LOGGER.debug(\"Updating Connectivity sensor...\") await", "_LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do", "= await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available", "HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async", "\"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds =", "def available(self) -> bool: return self._is_available @property def device_state_attributes(self) -> dict: return self._attributes", "datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr]", "ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from", "last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\",", "state from the FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self) -> None:", "to be online initially self._is_available = ( True # set to False if", "return self._attributes async def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services:", "= is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status =", "_async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\",", "def device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self): self._is_on = True try:", "self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up", "@property def device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available @property", "= self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status,", "from the FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self) -> None: _LOGGER.debug(\"Updating", "== \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds", "except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry", "True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up =", "async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up", "getting the state from the FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self)", "fritzbox to be online initially self._is_available = ( True # set to False", "assume the fritzbox to be online initially self._is_available = ( True # set", "= datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None:", "else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds = await", "<reponame>jloehr/ha-fritzbox-tools \"\"\"AVM Fritz!Box connectivitiy sensor\"\"\" import logging from collections import defaultdict import datetime", "True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class", "happend during toggling the switch ) self._attributes = defaultdict(str) super().__init__() @property def is_on(self)", "return self._is_available @property def device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self): self._is_on", "homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import", "hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support repeaters at the", "self._is_available @property def device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self): self._is_on =", "is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus", "@property def device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self): self._is_on = True", "ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries", "return self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available @property def device_state_attributes(self) ->", "attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr))", "\"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on =", "collections import defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError:", "import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass:", "= datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\",", "ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from", "import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60)", "await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for", "\"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\")", "from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN", "True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now()", "the FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self) -> None: _LOGGER.debug(\"Updating Connectivity", "\"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the", "]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state", "ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\"", "self._is_available = False async def async_update(self) -> None: _LOGGER.debug(\"Updating Connectivity sensor...\") await self._async_fetch_update()", "the fritzbox to be online initially self._is_available = ( True # set to", "try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await", "datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting", "device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on", "defaultdict(str) super().__init__() @property def is_on(self) -> bool: return self._is_on @property def unique_id(self): return", "def is_on(self) -> bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def", "Fritz!Box connectivitiy sensor\"\"\" import logging from collections import defaultdict import datetime try: from", "connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up ==", "__init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We", "[ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception:", "None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We", "hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools =", "We assume the fritzbox to be online initially self._is_available = ( True #", "# We assume the fritzbox to be online initially self._is_available = ( True", "super().__init__() @property def is_on(self) -> bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\"", "-> bool: return self._is_available @property def device_state_attributes(self) -> dict: return self._attributes async def", "attr)) except Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True) self._is_available =", "during toggling the switch ) self._attributes = defaultdict(str) super().__init__() @property def is_on(self) ->", "homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER", "if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support repeaters at the moment", "up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not", "sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services: \"\"\" We do not support", "= \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools", "= \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on =", "-> bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return", "import logging from collections import defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT,", "@property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def available(self)", "True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class = \"connectivity\"", "ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the fritzbox to be online initially", "= True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect =", "from collections import defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except", "async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in", "device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available @property def device_state_attributes(self)", "@property def available(self) -> bool: return self._is_available @property def device_state_attributes(self) -> dict: return", "= \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id =", "return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property", "self._is_on = is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status", "\"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on", "toggling the switch ) self._attributes = defaultdict(str) super().__init__() @property def is_on(self) -> bool:", "self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state from", "self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"]", "= fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the fritzbox", ") -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id] if \"WANIPConn1\" in fritzbox_tools.connection.services:", "status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() -", "try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice", "initially self._is_available = ( True # set to False if an error happend", "sensor\"\"\" import logging from collections import defaultdict import datetime try: from homeassistant.components.binary_sensor import", "datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT,", "bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info", "( True # set to False if an error happend during toggling the", "return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\" device_class =", "the switch ) self._attributes = defaultdict(str) super().__init__() @property def is_on(self) -> bool: return", "\"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting", "logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities )", "= ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the fritzbox to be online", "True # We assume the fritzbox to be online initially self._is_available = (", "async def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection =", "await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available =", "async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\" icon = \"mdi:router-wireless\"", "def device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available @property def", "DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType,", "the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box Connectivity\"", "BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from .", "if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection)", "last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda:", "def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda:", "homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity", "repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name =", "async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools", "self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected) self._is_available = True status = self.fritzbox_tools.fritzstatus uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\"))", "the state from the FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self) ->", "BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import", "self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available @property def device_state_attributes(self) -> dict:", "_LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry,", "do not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class", "BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE,", "in fritzbox_tools.connection.services: \"\"\" We do not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)],", "f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool: return self._is_available", "fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the fritzbox to", "import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER =", "False if an error happend during toggling the switch ) self._attributes = defaultdict(str)", "Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True) self._is_available = False async", "= await self.hass.async_add_executor_job(lambda: getattr(status, attr)) except Exception: _LOGGER.error(\"Error getting the state from the", "lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else:", "self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True # We assume the", "switch ) self._attributes = defaultdict(str) super().__init__() @property def is_on(self) -> bool: return self._is_on", "def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id = ENTITY_ID_FORMAT.format(f\"fritzbox_{self.fritzbox_tools.fritzbox_model}_connectivity\") self._is_on = True #", "self._attributes = defaultdict(str) super().__init__() @property def is_on(self) -> bool: return self._is_on @property def", "import ENTITY_ID_FORMAT, BinarySensorDevice as BinarySensorEntity from homeassistant.config_entries import ConfigEntry from homeassistant.helpers.typing import HomeAssistantType", "HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\") fritzbox_tools = hass.data[DOMAIN][DATA_FRITZ_TOOLS_INSTANCE][entry.entry_id]", "uptime_seconds = await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] =", "= await self.hass.async_add_executor_job(lambda: getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat()", "available(self) -> bool: return self._is_available @property def device_state_attributes(self) -> dict: return self._attributes async", "online initially self._is_available = ( True # set to False if an error", ". import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = datetime.timedelta(seconds=60) async def async_setup_entry(", "unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property def device_info(self): return self.fritzbox_tools.device_info @property def available(self) -> bool:", "error happend during toggling the switch ) self._attributes = defaultdict(str) super().__init__() @property def", "\"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [", "self._is_available = ( True # set to False if an error happend during", "= lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\"", "= defaultdict(str) super().__init__() @property def is_on(self) -> bool: return self._is_on @property def unique_id(self):", "fritzbox_tools.connection.services: \"\"\" We do not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True)", "defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor", "icon = \"mdi:router-wireless\" device_class = \"connectivity\" def __init__(self, fritzbox_tools): self.fritzbox_tools = fritzbox_tools self.entity_id", ") self._attributes = defaultdict(str) super().__init__() @property def is_on(self) -> bool: return self._is_on @property", "@property def is_on(self) -> bool: return self._is_on @property def unique_id(self): return f\"{self.fritzbox_tools.unique_id}-{self.entity_id}\" @property", "ConfigEntry from homeassistant.helpers.typing import HomeAssistantType from . import DATA_FRITZ_TOOLS_INSTANCE, DOMAIN _LOGGER = logging.getLogger(__name__)", "self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] =", "for attr in [ \"modelname\", \"external_ip\", \"external_ipv6\", ]: self._attributes[attr] = await self.hass.async_add_executor_job(lambda: getattr(status,", "FRITZ!Box\", exc_info=True) self._is_available = False async def async_update(self) -> None: _LOGGER.debug(\"Updating Connectivity sensor...\")", "\"\"\" We do not support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return", "at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name = \"FRITZ!Box", "set to False if an error happend during toggling the switch ) self._attributes", "bool: return self._is_available @property def device_state_attributes(self) -> dict: return self._attributes async def _async_fetch_update(self):", "to False if an error happend during toggling the switch ) self._attributes =", "is_up = await self.hass.async_add_executor_job(connection) self._is_on = is_up == \"Up\" else: self._is_on = self.hass.async_add_executor_job(self.fritzbox_tools.fritzstatus.is_connected)", "self._attributes async def _async_fetch_update(self): self._is_on = True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection", "from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorDevice as", "except Exception: _LOGGER.error(\"Error getting the state from the FRITZ!Box\", exc_info=True) self._is_available = False", "def async_setup_entry( hass: HomeAssistantType, entry: ConfigEntry, async_add_entities ) -> None: _LOGGER.debug(\"Setting up sensors\")", "= True try: if \"WANCommonInterfaceConfig1\" in self.fritzbox_tools.connection.services: connection = lambda: self.fritzbox_tools.connection.call_action(\"WANCommonInterfaceConfig1\", \"GetCommonLinkProperties\")[\"NewPhysicalLinkStatus\"] is_up", "getattr(status, \"uptime\")) last_reconnect = datetime.datetime.now() - datetime.timedelta(seconds=uptime_seconds) self._attributes[\"last_reconnect\"] = last_reconnect.replace(microsecond=0).isoformat() for attr in", "# set to False if an error happend during toggling the switch )", "support repeaters at the moment \"\"\" async_add_entities([FritzBoxConnectivitySensor(fritzbox_tools)], True) return True class FritzBoxConnectivitySensor(BinarySensorEntity): name", "import defaultdict import datetime try: from homeassistant.components.binary_sensor import ENTITY_ID_FORMAT, BinarySensorEntity except ImportError: from" ]
[ "1, right - 1, i + 1 else: while i<n: res[i], left, i", "\"\"\" left, right, res = 0, n+1, [None]*n for i in range(n): if", "int :type k: int :rtype: List[int] \"\"\" left, right, res = 0, n+1,", "right, i = right - 1, right - 1, i + 1 else:", "left + 1, left + 1, i + 1 return res else: if", "- 1, i + 1 else: while i<n: res[i], left, i = left", "range(n): if k == 1: if i%2 == 0: while i<n: res[i], right,", "1 else: res[i], left = left + 1, left + 1 if i", "== 1: if i%2 == 0: while i<n: res[i], right, i = right", "+ 1 else: while i<n: res[i], left, i = left + 1, left", "if i%2 == 0: while i<n: res[i], right, i = right - 1,", "right = right - 1, right - 1 else: res[i], left = left", "if i%2 != 0: res[i], right = right - 1, right - 1", "i%2 == 0: while i<n: res[i], right, i = right - 1, right", "= right - 1, right - 1, i + 1 else: while i<n:", "- 1, right - 1, i + 1 else: while i<n: res[i], left,", "- 1 else: res[i], left = left + 1, left + 1 if", "res else: if i%2 != 0: res[i], right = right - 1, right", "= 0, n+1, [None]*n for i in range(n): if k == 1: if", "left + 1, left + 1 if i != 0: k -= 1", "res[i], left = left + 1, left + 1 if i != 0:", "right - 1 else: res[i], left = left + 1, left + 1", "[None]*n for i in range(n): if k == 1: if i%2 == 0:", "+ 1 return res else: if i%2 != 0: res[i], right = right", ":rtype: List[int] \"\"\" left, right, res = 0, n+1, [None]*n for i in", "res[i], left, i = left + 1, left + 1, i + 1", "while i<n: res[i], left, i = left + 1, left + 1, i", "else: if i%2 != 0: res[i], right = right - 1, right -", "k): \"\"\" :type n: int :type k: int :rtype: List[int] \"\"\" left, right,", "1, left + 1, i + 1 return res else: if i%2 !=", "0: while i<n: res[i], right, i = right - 1, right - 1,", "0: res[i], right = right - 1, right - 1 else: res[i], left", "i in range(n): if k == 1: if i%2 == 0: while i<n:", "right - 1, right - 1, i + 1 else: while i<n: res[i],", "return res else: if i%2 != 0: res[i], right = right - 1,", "1 else: while i<n: res[i], left, i = left + 1, left +", "= right - 1, right - 1 else: res[i], left = left +", "else: res[i], left = left + 1, left + 1 if i !=", "Solution: def constructArray(self, n, k): \"\"\" :type n: int :type k: int :rtype:", "i + 1 return res else: if i%2 != 0: res[i], right =", "0, n+1, [None]*n for i in range(n): if k == 1: if i%2", "1: if i%2 == 0: while i<n: res[i], right, i = right -", "class Solution: def constructArray(self, n, k): \"\"\" :type n: int :type k: int", "= left + 1, left + 1 if i != 0: k -=", "for i in range(n): if k == 1: if i%2 == 0: while", "in range(n): if k == 1: if i%2 == 0: while i<n: res[i],", "i<n: res[i], right, i = right - 1, right - 1, i +", "constructArray(self, n, k): \"\"\" :type n: int :type k: int :rtype: List[int] \"\"\"", "- 1, right - 1 else: res[i], left = left + 1, left", "+ 1, left + 1, i + 1 return res else: if i%2", "i + 1 else: while i<n: res[i], left, i = left + 1,", "def constructArray(self, n, k): \"\"\" :type n: int :type k: int :rtype: List[int]", "left + 1, i + 1 return res else: if i%2 != 0:", "1, i + 1 else: while i<n: res[i], left, i = left +", "else: while i<n: res[i], left, i = left + 1, left + 1,", "right - 1, i + 1 else: while i<n: res[i], left, i =", "left, right, res = 0, n+1, [None]*n for i in range(n): if k", "k: int :rtype: List[int] \"\"\" left, right, res = 0, n+1, [None]*n for", "1, right - 1 else: res[i], left = left + 1, left +", "int :rtype: List[int] \"\"\" left, right, res = 0, n+1, [None]*n for i", "+ 1, i + 1 return res else: if i%2 != 0: res[i],", "i = right - 1, right - 1, i + 1 else: while", "i%2 != 0: res[i], right = right - 1, right - 1 else:", "res = 0, n+1, [None]*n for i in range(n): if k == 1:", "n+1, [None]*n for i in range(n): if k == 1: if i%2 ==", "List[int] \"\"\" left, right, res = 0, n+1, [None]*n for i in range(n):", "= left + 1, left + 1, i + 1 return res else:", ":type n: int :type k: int :rtype: List[int] \"\"\" left, right, res =", "i = left + 1, left + 1, i + 1 return res", "n: int :type k: int :rtype: List[int] \"\"\" left, right, res = 0,", "res[i], right, i = right - 1, right - 1, i + 1", "i<n: res[i], left, i = left + 1, left + 1, i +", "res[i], right = right - 1, right - 1 else: res[i], left =", "\"\"\" :type n: int :type k: int :rtype: List[int] \"\"\" left, right, res", "1, i + 1 return res else: if i%2 != 0: res[i], right", "right, res = 0, n+1, [None]*n for i in range(n): if k ==", "left = left + 1, left + 1 if i != 0: k", "1 return res else: if i%2 != 0: res[i], right = right -", "while i<n: res[i], right, i = right - 1, right - 1, i", "== 0: while i<n: res[i], right, i = right - 1, right -", "right - 1, right - 1 else: res[i], left = left + 1,", "left, i = left + 1, left + 1, i + 1 return", "k == 1: if i%2 == 0: while i<n: res[i], right, i =", "if k == 1: if i%2 == 0: while i<n: res[i], right, i", "!= 0: res[i], right = right - 1, right - 1 else: res[i],", "n, k): \"\"\" :type n: int :type k: int :rtype: List[int] \"\"\" left,", ":type k: int :rtype: List[int] \"\"\" left, right, res = 0, n+1, [None]*n" ]
[ "in ['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim = dim self.mem", "dimension, for conv layers, shape is the number of feature maps ''' assert", "name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self,", "sq = T.sqr(state_below) # b, ch, r, c = state_below.shape # extra_channels =", "T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted from pylearn2 #", "* var + (1-self.mem) * self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var", "for i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # # scale", "self.layer_type = layer_type self.epsilon = 1e-6 self.dim = dim self.mem = short_memory if", "is the number of feature maps ''' assert layer_type in ['fc', 'conv'] self.layer_type", "* x_t, the larger the short term memory, the more weight is put", "dim: for fc layers, shape is the layer dimension, for conv layers, shape", "the number of feature maps ''' assert layer_type in ['fc', 'conv'] self.layer_type =", "or conv epsilon: denominator min value for preventing division by zero in computing", "sq) # scale = self.k # # for i in xrange(self.n): # scale", "self.broadcastable = (True, False) elif self.layer_type == 'conv': input_shape = (1, dim, 1,", "(1-memory) * x_t, the larger the short term memory, the more weight is", "var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu +", "miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv':", "latest value, the moving average x_tp1 is calculated as x_tp1 = memory *", "('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\"", "= T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for i in xrange(self.n):", "miu)**2, axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below", "put on contempory. layer_type: fc or conv epsilon: denominator min value for preventing", "self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var =", "self.n = n # self.alpha = alpha # self.beta = beta # self.k", "self.beta = beta # self.k = k # assert self.n % 2 ==", "gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z +", "('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted from pylearn2", "Covariate Shift PARAMS: short_memory: short term memory y_t is the latest value, the", "self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem *", "beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean',", "('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted", "self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z", "# def _train_fprop(self, state_below): # half = self.n / 2 # sq =", "False) elif self.layer_type == 'conv': input_shape = (1, dim, 1, 1) self.broadcastable =", "denominator min value for preventing division by zero in computing std dim: for", "value, the moving average x_tp1 is calculated as x_tp1 = memory * y_t", "half = self.n / 2 # sq = T.sqr(state_below) # b, ch, r,", "pylearn2 # Local Response Normalization # \"\"\" # # def __init__(self, n=5, alpha=0.0001,", "axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below -", "= shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1", "more weight is put on contempory. layer_type: fc or conv epsilon: denominator min", "shape is the number of feature maps ''' assert layer_type in ['fc', 'conv']", "elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2,", "term memory y_t is the latest value, the moving average x_tp1 is calculated", "= self.mem * var + (1-self.mem) * self.moving_var Z = (state_below - self.moving_mean)", "theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization:", "layers, shape is the layer dimension, for conv layers, shape is the number", "self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0)", "n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n # self.alpha", "x_tp1 is calculated as x_tp1 = memory * y_t + (1-memory) * x_t,", "state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2,", "memory, the more weight is put on contempory. layer_type: fc or conv epsilon:", "self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean", "* self.moving_mean self.moving_var = self.mem * var + (1-self.mem) * self.moving_var Z =", "self.mem = short_memory if self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable =", "self.layer_type == 'conv': input_shape = (1, dim, 1, 1) self.broadcastable = (True, False,", "self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma,", "is calculated as x_tp1 = memory * y_t + (1-memory) * x_t, the", "self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type == 'fc':", "True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta]", "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift PARAMS: short_memory:", "class LRN(Template): # \"\"\" # Adapted from pylearn2 # Local Response Normalization #", "layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] #", "= state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half, r, c) #", "self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable)", "'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean", "input_shape = (1, dim, 1, 1) self.broadcastable = (True, False, True, True) self.gamma", "'fc': input_shape = (1, dim) self.broadcastable = (True, False) elif self.layer_type == 'conv':", "gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0", "miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var", "is put on contempory. layer_type: fc or conv epsilon: denominator min value for", "2 # sq = T.sqr(state_below) # b, ch, r, c = state_below.shape #", "scale ** self.beta # return state_below / scale # # def _test_fprop(self, state_below):", "Network Training by Reducing Internal Covariate Shift PARAMS: short_memory: short term memory y_t", "* self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma =", "= (1, dim, 1, 1) self.broadcastable = (True, False, True, True) self.gamma =", "# def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n =", "# self.alpha = alpha # self.beta = beta # self.k = k #", "self.beta] self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type ==", "is supported' # # def _train_fprop(self, state_below): # half = self.n / 2", "* y_t + (1-memory) * x_t, the larger the short term memory, the", "Normalization # \"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN,", "= (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta", "self.k = k # assert self.n % 2 == 1, 'only odd n", "* sq[:,i:i+ch,:,:] # # scale = scale ** self.beta # return state_below /", "is the layer dimension, for conv layers, shape is the number of feature", "layer_type self.epsilon = 1e-6 self.dim = dim self.mem = short_memory if self.layer_type ==", "name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var", "# sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for i", "- miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean", "shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor as T import theano", "import UniformWeight import theano.tensor as T import theano class BatchNormalization(Template): def __init__(self, dim,", "# super(LRN, self).__init__() # self.n = n # self.alpha = alpha # self.beta", "c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for", "UniformWeight import theano.tensor as T import theano class BatchNormalization(Template): def __init__(self, dim, layer_type,", "= 1e-6 self.dim = dim self.mem = short_memory if self.layer_type == 'fc': input_shape", "* miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem * var + (1-self.mem)", "input_shape = (1, dim) self.broadcastable = (True, False) elif self.layer_type == 'conv': input_shape", "1 def _train_fprop(self, state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0) var =", "sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor as T import theano class", "from pylearn2 # Local Response Normalization # \"\"\" # # def __init__(self, n=5,", "in computing std dim: for fc layers, shape is the layer dimension, for", "== 'fc': input_shape = (1, dim) self.broadcastable = (True, False) elif self.layer_type ==", "# # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n", "beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _test_fprop(self, state_below):", "layer dimension, for conv layers, shape is the number of feature maps '''", "+ beta def _test_fprop(self, state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var +", "self.broadcastable) return gamma * Z + beta def _layer_stats(self, state_below, layer_output): return [('moving_mean',", "memory * y_t + (1-memory) * x_t, the larger the short term memory,", "Template from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor", "mozi.weight_init import UniformWeight import theano.tensor as T import theano class BatchNormalization(Template): def __init__(self,", "T.sqr(state_below) # b, ch, r, c = state_below.shape # extra_channels = T.alloc(0., b,", "('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted from pylearn2 # Local", "n is supported' # # def _train_fprop(self, state_below): # half = self.n /", "# class LRN(Template): # \"\"\" # Adapted from pylearn2 # Local Response Normalization", "beta # self.k = k # assert self.n % 2 == 1, 'only", "\"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() #", "self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable = (True, False) elif self.layer_type", "elif self.layer_type == 'conv': input_shape = (1, dim, 1, 1) self.broadcastable = (True,", "gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing Internal", "mozi.layers.template import Template from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight", "Internal Covariate Shift PARAMS: short_memory: short term memory y_t is the latest value,", "var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3),", "== 'conv': input_shape = (1, dim, 1, 1) self.broadcastable = (True, False, True,", "state_below): # half = self.n / 2 # sq = T.sqr(state_below) # b,", "# # for i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] #", "(1, dim, 1, 1) self.broadcastable = (True, False, True, True) self.gamma = gamma_init(input_shape,", "Adapted from pylearn2 # Local Response Normalization # \"\"\" # # def __init__(self,", "y_t + (1-memory) * x_t, the larger the short term memory, the more", "as x_tp1 = memory * y_t + (1-memory) * x_t, the larger the", "def _test_fprop(self, state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma", "Local Response Normalization # \"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2):", "# # scale = scale ** self.beta # return state_below / scale #", "T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _layer_stats(self, state_below, layer_output): return", "self.epsilon = 1e-6 self.dim = dim self.mem = short_memory if self.layer_type == 'fc':", "+ (1-memory) * x_t, the larger the short term memory, the more weight", "True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma,", "dim) self.broadcastable = (True, False) elif self.layer_type == 'conv': input_shape = (1, dim,", "import theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch", "layers, shape is the number of feature maps ''' assert layer_type in ['fc',", "* Z + beta def _test_fprop(self, state_below): Z = (state_below - self.moving_mean) /", "short_memory: short term memory y_t is the latest value, the moving average x_tp1", "= beta # self.k = k # assert self.n % 2 == 1,", "state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))]", "import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor as T import", "T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma", "# scale = scale ** self.beta # return state_below / scale # #", "T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var", "the larger the short term memory, the more weight is put on contempory.", "keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu", "extra_channels = T.alloc(0., b, ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:],", "short term memory, the more weight is put on contempory. layer_type: fc or", "T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): #", "miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean =", "(state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta =", "def _train_fprop(self, state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below", "= T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta", "self.alpha = alpha # self.beta = beta # self.k = k # assert", "+ 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k", "BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep", "= k # assert self.n % 2 == 1, 'only odd n is", "alpha # self.beta = beta # self.k = k # assert self.n %", "* Z + beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)),", "+ self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma *", "self.dim = dim self.mem = short_memory if self.layer_type == 'fc': input_shape = (1,", "b, ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale", "Accelerating Deep Network Training by Reducing Internal Covariate Shift PARAMS: short_memory: short term", "dim self.mem = short_memory if self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable", "odd n is supported' # # def _train_fprop(self, state_below): # half = self.n", "conv epsilon: denominator min value for preventing division by zero in computing std", "self.mem * var + (1-self.mem) * self.moving_var Z = (state_below - self.moving_mean) /", "by Reducing Internal Covariate Shift PARAMS: short_memory: short term memory y_t is the", "(True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params", "scale = scale ** self.beta # return state_below / scale # # def", "preventing division by zero in computing std dim: for fc layers, shape is", "import Template from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import", "c = state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half, r, c)", "b, ch, r, c = state_below.shape # extra_channels = T.alloc(0., b, ch +", "= T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _test_fprop(self, state_below): Z", "def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network", "on contempory. layer_type: fc or conv epsilon: denominator min value for preventing division", "= T.alloc(0., b, ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq)", "T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for i in xrange(self.n): #", "(1-self.mem) * self.moving_mean self.moving_var = self.mem * var + (1-self.mem) * self.moving_var Z", "+ beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)),", "T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" #", "k=2): # super(LRN, self).__init__() # self.n = n # self.alpha = alpha #", "self.k # # for i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:]", "as T import theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): '''", "+ (1-self.mem) * self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)", "T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _test_fprop(self, state_below): Z =", "weight is put on contempory. layer_type: fc or conv epsilon: denominator min value", "return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class", "= alpha # self.beta = beta # self.k = k # assert self.n", "- miu)**2, axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var =", "T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted from", "% 2 == 1, 'only odd n is supported' # # def _train_fprop(self,", "Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable)", "axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var =", "self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _test_fprop(self,", "y_t is the latest value, the moving average x_tp1 is calculated as x_tp1", "= dim self.mem = short_memory if self.layer_type == 'fc': input_shape = (1, dim)", "= T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _layer_stats(self, state_below, layer_output):", "the moving average x_tp1 is calculated as x_tp1 = memory * y_t +", "= (True, False) elif self.layer_type == 'conv': input_shape = (1, dim, 1, 1)", "shape is the layer dimension, for conv layers, shape is the number of", "1, 'only odd n is supported' # # def _train_fprop(self, state_below): # half", "gamma * Z + beta def _test_fprop(self, state_below): Z = (state_below - self.moving_mean)", "LRN(Template): # \"\"\" # Adapted from pylearn2 # Local Response Normalization # \"\"\"", "the latest value, the moving average x_tp1 is calculated as x_tp1 = memory", "if self.layer_type == 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0)", "= self.k # # for i in xrange(self.n): # scale += self.alpha *", "self.beta # return state_below / scale # # def _test_fprop(self, state_below): # return", "keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem", "theano.tensor as T import theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01):", "alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n # self.alpha =", "# sq = T.sqr(state_below) # b, ch, r, c = state_below.shape # extra_channels", "= gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean =", "short_memory if self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable = (True, False)", "Response Normalization # \"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): #", "T.alloc(0., b, ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) #", "assert layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim =", "+= self.alpha * sq[:,i:i+ch,:,:] # # scale = scale ** self.beta # return", "x_tp1 = memory * y_t + (1-memory) * x_t, the larger the short", "= scale ** self.beta # return state_below / scale # # def _test_fprop(self,", "/ T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return", "= state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem", "shared_zeros(input_shape, name='beta') self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1 def", "term memory, the more weight is put on contempory. layer_type: fc or conv", "''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift", "state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half, r, c) # sq", "the short term memory, the more weight is put on contempory. layer_type: fc", "# assert self.n % 2 == 1, 'only odd n is supported' #", "number of feature maps ''' assert layer_type in ['fc', 'conv'] self.layer_type = layer_type", "(True, False) elif self.layer_type == 'conv': input_shape = (1, dim, 1, 1) self.broadcastable", "= layer_type self.epsilon = 1e-6 self.dim = dim self.mem = short_memory if self.layer_type", "= T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True)", "1) self.broadcastable = (True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta =", "_train_fprop(self, state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below -", "(1, dim) self.broadcastable = (True, False) elif self.layer_type == 'conv': input_shape = (1,", "'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim = dim self.mem = short_memory", "= T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem)", "= T.sqr(state_below) # b, ch, r, c = state_below.shape # extra_channels = T.alloc(0.,", "dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by", "zero in computing std dim: for fc layers, shape is the layer dimension,", "maps ''' assert layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6", "beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _layer_stats(self, state_below,", "Deep Network Training by Reducing Internal Covariate Shift PARAMS: short_memory: short term memory", "moving average x_tp1 is calculated as x_tp1 = memory * y_t + (1-memory)", "value for preventing division by zero in computing std dim: for fc layers,", "state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem *", "# scale = self.k # # for i in xrange(self.n): # scale +=", "import theano.tensor as T import theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(),", "\"\"\" # Adapted from pylearn2 # Local Response Normalization # \"\"\" # #", "_train_fprop(self, state_below): # half = self.n / 2 # sq = T.sqr(state_below) #", "contempory. layer_type: fc or conv epsilon: denominator min value for preventing division by", "self.n / 2 # sq = T.sqr(state_below) # b, ch, r, c =", "''' assert layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim", "2 == 1, 'only odd n is supported' # # def _train_fprop(self, state_below):", "= short_memory if self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable = (True,", "self.broadcastable) return gamma * Z + beta def _test_fprop(self, state_below): Z = (state_below", "short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate", "shared_ones from mozi.weight_init import UniformWeight import theano.tensor as T import theano class BatchNormalization(Template):", "- self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta,", "if self.layer_type == 'fc': input_shape = (1, dim) self.broadcastable = (True, False) elif", "<gh_stars>100-1000 from mozi.layers.template import Template from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init", "var + (1-self.mem) * self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var +", "self.broadcastable = (True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape,", "= [self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self, state_below): if", "# Adapted from pylearn2 # Local Response Normalization # \"\"\" # # def", "class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating", "computing std dim: for fc layers, shape is the layer dimension, for conv", "ch + 2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale =", "for conv layers, shape is the number of feature maps ''' assert layer_type", "short term memory y_t is the latest value, the moving average x_tp1 is", "miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem * var + (1-self.mem) *", "1, 1) self.broadcastable = (True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta", "mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor as T", "is the latest value, the moving average x_tp1 is calculated as x_tp1 =", "REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift PARAMS:", "from mozi.weight_init import UniformWeight import theano.tensor as T import theano class BatchNormalization(Template): def", "memory y_t is the latest value, the moving average x_tp1 is calculated as", "self.alpha * sq[:,i:i+ch,:,:] # # scale = scale ** self.beta # return state_below", "(1-self.mem) * self.moving_var Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma", "division by zero in computing std dim: for fc layers, shape is the", "from mozi.layers.template import Template from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import", "layer_type: fc or conv epsilon: denominator min value for preventing division by zero", "PARAMS: short_memory: short term memory y_t is the latest value, the moving average", "T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True) self.moving_mean = self.mem * miu + (1-self.mem) *", "def _train_fprop(self, state_below): # half = self.n / 2 # sq = T.sqr(state_below)", "== 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True)", "n # self.alpha = alpha # self.beta = beta # self.k = k", "by zero in computing std dim: for fc layers, shape is the layer", "state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma = T.patternbroadcast(self.gamma,", "= (True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta')", "fc or conv epsilon: denominator min value for preventing division by zero in", "i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # # scale =", "calculated as x_tp1 = memory * y_t + (1-memory) * x_t, the larger", "# b, ch, r, c = state_below.shape # extra_channels = T.alloc(0., b, ch", "[('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max', T.max(self.gamma))] # class LRN(Template):", "False, True, True) self.gamma = gamma_init(input_shape, name='gamma') self.beta = shared_zeros(input_shape, name='beta') self.params =", "['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim = dim self.mem =", "/ 2 # sq = T.sqr(state_below) # b, ch, r, c = state_below.shape", "self.moving_var = self.mem * var + (1-self.mem) * self.moving_var Z = (state_below -", "sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # # for i in", "self).__init__() # self.n = n # self.alpha = alpha # self.beta = beta", "def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)),", "r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k # #", "_test_fprop(self, state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon) gamma =", "k # assert self.n % 2 == 1, 'only odd n is supported'", "2*half, r, c) # sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq) # scale = self.k #", "# \"\"\" # Adapted from pylearn2 # Local Response Normalization # \"\"\" #", "self.n % 2 == 1, 'only odd n is supported' # # def", "scale += self.alpha * sq[:,i:i+ch,:,:] # # scale = scale ** self.beta #", "= self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem * var", "self.mem * miu + (1-self.mem) * self.moving_mean self.moving_var = self.mem * var +", "self.layer_type == 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif", "Shift PARAMS: short_memory: short term memory y_t is the latest value, the moving", "== 'fc': miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type", "= (1, dim) self.broadcastable = (True, False) elif self.layer_type == 'conv': input_shape =", "# self.n = n # self.alpha = alpha # self.beta = beta #", "# for i in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # #", "layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network Training by Reducing", "sq[:,i:i+ch,:,:] # # scale = scale ** self.beta # return state_below / scale", "# half = self.n / 2 # sq = T.sqr(state_below) # b, ch,", "gamma * Z + beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std',", "layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon = 1e-6 self.dim = dim", "the layer dimension, for conv layers, shape is the number of feature maps", "'only odd n is supported' # # def _train_fprop(self, state_below): # half =", "# \"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__()", "conv layers, shape is the number of feature maps ''' assert layer_type in", "feature maps ''' assert layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon =", "beta def _test_fprop(self, state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)", "T.max(self.gamma))] # class LRN(Template): # \"\"\" # Adapted from pylearn2 # Local Response", "Reducing Internal Covariate Shift PARAMS: short_memory: short term memory y_t is the latest", "fc layers, shape is the layer dimension, for conv layers, shape is the", "from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones from mozi.weight_init import UniformWeight import theano.tensor as", "= self.n / 2 # sq = T.sqr(state_below) # b, ch, r, c", "= n # self.alpha = alpha # self.beta = beta # self.k =", "__init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE: Batch Normalization: Accelerating Deep Network Training", "in xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # # scale = scale", "== 1, 'only odd n is supported' # # def _train_fprop(self, state_below): #", "# Local Response Normalization # \"\"\" # # def __init__(self, n=5, alpha=0.0001, beta=0.75,", "supported' # # def _train_fprop(self, state_below): # half = self.n / 2 #", "_layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean', T.mean(self.gamma)), ('beta_mean', T.mean(self.beta)), ('gamma_max',", "T import theano class BatchNormalization(Template): def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01): ''' REFERENCE:", "ch, r, c = state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half,", "self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def _layer_stats(self,", "return gamma * Z + beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)),", "# # def _train_fprop(self, state_below): # half = self.n / 2 # sq", "dim, 1, 1) self.broadcastable = (True, False, True, True) self.gamma = gamma_init(input_shape, name='gamma')", "__init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n #", "# self.beta = beta # self.k = k # assert self.n % 2", "for fc layers, shape is the layer dimension, for conv layers, shape is", "0 self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type == 'fc': miu =", "= state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv': miu", "= 1 def _train_fprop(self, state_below): if self.layer_type == 'fc': miu = state_below.mean(axis=0) var", "T.patternbroadcast(self.gamma, self.broadcastable) beta = T.patternbroadcast(self.beta, self.broadcastable) return gamma * Z + beta def", "of feature maps ''' assert layer_type in ['fc', 'conv'] self.layer_type = layer_type self.epsilon", "'conv': input_shape = (1, dim, 1, 1) self.broadcastable = (True, False, True, True)", "+ (1-self.mem) * self.moving_mean self.moving_var = self.mem * var + (1-self.mem) * self.moving_var", "assert self.n % 2 == 1, 'only odd n is supported' # #", "= memory * y_t + (1-memory) * x_t, the larger the short term", "** self.beta # return state_below / scale # # def _test_fprop(self, state_below): #", "state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type == 'conv': miu =", "def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n", "# self.k = k # assert self.n % 2 == 1, 'only odd", "scale = self.k # # for i in xrange(self.n): # scale += self.alpha", "'fc': miu = state_below.mean(axis=0) var = T.mean((state_below - miu)**2, axis=0) elif self.layer_type ==", "super(LRN, self).__init__() # self.n = n # self.alpha = alpha # self.beta =", "1e-6 self.dim = dim self.mem = short_memory if self.layer_type == 'fc': input_shape =", "the more weight is put on contempory. layer_type: fc or conv epsilon: denominator", "= 0 self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type == 'fc': miu", "epsilon: denominator min value for preventing division by zero in computing std dim:", "self.params = [self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self, state_below):", "# scale += self.alpha * sq[:,i:i+ch,:,:] # # scale = scale ** self.beta", "xrange(self.n): # scale += self.alpha * sq[:,i:i+ch,:,:] # # scale = scale **", "x_t, the larger the short term memory, the more weight is put on", "average x_tp1 is calculated as x_tp1 = memory * y_t + (1-memory) *", "for preventing division by zero in computing std dim: for fc layers, shape", "Training by Reducing Internal Covariate Shift PARAMS: short_memory: short term memory y_t is", "self.layer_type == 'conv': miu = state_below.mean(axis=(0,2,3), keepdims=True) var = T.mean((state_below - miu)**2, axis=(0,2,3),", "Z + beta def _layer_stats(self, state_below, layer_output): return [('moving_mean', T.mean(self.moving_mean)), ('moving_std', T.mean(self.moving_var)), ('gamma_mean',", "std dim: for fc layers, shape is the layer dimension, for conv layers,", "self.moving_mean self.moving_var = self.mem * var + (1-self.mem) * self.moving_var Z = (state_below", "# return state_below / scale # # def _test_fprop(self, state_below): # return self._train_fprop(state_below)", "Z + beta def _test_fprop(self, state_below): Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var", "Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift PARAMS: short_memory: short", "beta=0.75, k=2): # super(LRN, self).__init__() # self.n = n # self.alpha = alpha", "return gamma * Z + beta def _test_fprop(self, state_below): Z = (state_below -", "[self.gamma, self.beta] self.moving_mean = 0 self.moving_var = 1 def _train_fprop(self, state_below): if self.layer_type", "# extra_channels = T.alloc(0., b, ch + 2*half, r, c) # sq =", "r, c = state_below.shape # extra_channels = T.alloc(0., b, ch + 2*half, r,", "min value for preventing division by zero in computing std dim: for fc", "larger the short term memory, the more weight is put on contempory. layer_type:" ]
[ "len(row) grad = [0.0 for _ in range(m + 1)] diff = compute_y(model,", "fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row in fold] test_pred =", "\"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i) for i in", "in range(m): y += row[i] * model[i] return y + model[m] def compute_grad_mse(model,", "/ float(n) return expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k = 5", "in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models,", "= [int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1] = size", "for _ in range(m + 1)] diff = compute_y(model, row) - actual for", "actual for i in range(m): grad[i] += 2.0 * row[i] * diff grad[m]", "fold] fold = [r[0:-1] for r in fold] test_actual = [r[-1] for r", "in range(len(fold)): row = fold[j] actual = actuals[j] grad = compute_grad_mse(model, row, actual)", "= 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test,", "sklearn import preprocessing from plotly import graph_objects def import_data(path): return [[float(f) for f", "= \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations = 50 dataset =", "dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test =", "normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits =", "sd += ((d - expectation) ** 2) / float(n) return expectation, sd **", "- sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for s in splits: sets.append([dataset[i]", "rmses_test, r2s_test def compute_stat(data): n = len(data) expectation = 0.0 for d in", "= cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr", "normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate,", "in range(len(actual)): expect += actual[i] / float(len(actual)) for i in range(len(actual)): denominator +=", "j in range(len(fold)): row = fold[j] actual = actuals[j] grad = compute_grad_mse(model, row,", "fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test,", "model def compute_rmse(prediction, actual): mse = 0.0 n = len(prediction) for i in", "\"R2 (test)\"] + [\"f\" + str(i) for i in range(len(dataset[0]))] ] + \\", "sd ** 0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations", "s)]) offset += s return sets def compute_y(model, row): m = len(row) y", "rmse_test, r2_test] + models_tr] values = [\"X\"] + [\"Fold\" + str(i) for i", "[] rmses = [] r2s = [] rmses_test = [] r2s_test = []", "range(m): y += row[i] * model[i] return y + model[m] def compute_grad_mse(model, row,", "run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i in range(cv_k)] for j in", "for j in range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test,", "d / float(n) sd = 0.0 for d in data: sd += ((d", "[ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] +", "rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i in range(cv_k)]", "row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual))", "+ \\ [ [stats[j][i] for j in range(len(stats))] for i in range(2) ]", "actuals[j] grad = compute_grad_mse(model, row, actual) model = [model[k] - learning_rate / (1", "model = [0.0 for _ in range(m + 1)] for i in range(iterations_count):", "in splits: sets.append([dataset[i] for i in range(offset, offset + s)]) offset += s", "i in range(cv_k) ] + \\ [ [stats[j][i] for j in range(len(stats))] for", "for r in fold] fold = [r[0:-1] for r in fold] test_actual =", "/ float(n) return mse ** 0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator", "- actual for i in range(m): grad[i] += 2.0 * row[i] * diff", "expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01", "return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n = len(data) expectation =", "test] test = [r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate,", "test = [r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count)", "test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n", "range(m): grad[i] += 2.0 * row[i] * diff grad[m] += 2.0 * diff", "] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in", "str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2", "csv import reader from sklearn import preprocessing from plotly import graph_objects def import_data(path):", "[]) test = set fold_actual = [r[-1] for r in fold] fold =", "learn_rate, iterations) models_tr = [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))]", "i in range(iterations_count): for j in range(len(fold)): row = fold[j] actual = actuals[j]", "0 for s in splits: sets.append([dataset[i] for i in range(offset, offset + s)])", "len(dataset) splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1]", "models.append(model) return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n = len(data) expectation", "[] for set in sets: fold = list(sets) fold.remove(set) fold = sum(fold, [])", "def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size / cross_validation_k) for _", "i in range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data) for data in", "0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations = 50", "= len(dataset) splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k -", "* model[i] return y + model[m] def compute_grad_mse(model, row, actual): m = len(row)", "for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset)", "data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"] + [\"Fold\"", "return expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate =", "import_data(path): return [[float(f) for f in r] for r in reader(open(path, \"r\"))] def", "[0.0 for _ in range(m + 1)] for i in range(iterations_count): for j", "(actual[i] - prediction[i]) ** 2 for i in range(len(actual)): expect += actual[i] /", "** 2) / float(n) return mse ** 0.5 def compute_r2(prediction, actual): nominator =", "row, actual) model = [model[k] - learning_rate / (1 + i) * grad[k]", "preprocessing from plotly import graph_objects def import_data(path): return [[float(f) for f in r]", "/ float(n) sd = 0.0 for d in data: sd += ((d -", "[r[-1] for r in fold] fold = [r[0:-1] for r in fold] test_actual", "** 0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations =", "for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model)", "range(len(actual)): expect += actual[i] / float(len(actual)) for i in range(len(actual)): denominator += (actual[i]", "+ str(i) for i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i],", "run_learning(sets, learning_rate, iterations_count): models = [] rmses = [] r2s = [] rmses_test", "sets def compute_y(model, row): m = len(row) y = 0.0 for i in", "mse = 0.0 n = len(prediction) for i in range(n): mse += ((prediction[i]", "rmses, r2s, rmses_test, r2s_test def compute_stat(data): n = len(data) expectation = 0.0 for", "= [] r2s_test = [] for set in sets: fold = list(sets) fold.remove(set)", "= [0.0 for _ in range(m + 1)] for i in range(iterations_count): for", "range(cv_k) ] + \\ [ [stats[j][i] for j in range(len(stats))] for i in", "compute_rmse(prediction, actual): mse = 0.0 n = len(prediction) for i in range(n): mse", "for set in sets: fold = list(sets) fold.remove(set) fold = sum(fold, []) test", "- float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models = [] rmses =", "= scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size", "for i in range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2 for i", "* diff grad[m] += 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate,", "learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row in fold] test_pred = [compute_y(model,", "= actuals[j] grad = compute_grad_mse(model, row, actual) model = [model[k] - learning_rate /", "fold.remove(set) fold = sum(fold, []) test = set fold_actual = [r[-1] for r", "m = len(row) grad = [0.0 for _ in range(m + 1)] diff", "+= 2.0 * row[i] * diff grad[m] += 2.0 * diff return grad", "graph_objects def import_data(path): return [[float(f) for f in r] for r in reader(open(path,", "= preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset)", "_ in range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets = list()", "test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n = len(data)", "stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values", "in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for", "/ cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1])", "** 2 for i in range(len(actual)): expect += actual[i] / float(len(actual)) for i", "learn_rate = 0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k)", "for i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] +", "test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row", "range(m + 1)] return model def compute_rmse(prediction, actual): mse = 0.0 n =", "offset += s return sets def compute_y(model, row): m = len(row) y =", "+ 1)] diff = compute_y(model, row) - actual for i in range(m): grad[i]", "= [0.0 for _ in range(m + 1)] diff = compute_y(model, row) -", "mse += ((prediction[i] - actual[i]) ** 2) / float(n) return mse ** 0.5", "row[i] * diff grad[m] += 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals,", "= [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual))", "len(row) y = 0.0 for i in range(m): y += row[i] * model[i]", "fold[j] actual = actuals[j] grad = compute_grad_mse(model, row, actual) model = [model[k] -", "len(prediction) for i in range(n): mse += ((prediction[i] - actual[i]) ** 2) /", "0.0 denominator = 0.0 expect = 0.0 for i in range(len(actual)): nominator +=", "iterations) models_tr = [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))] stats", "rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data):", "[int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1] = size -", "expect) ** 2 return 1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count):", "= list(sets) fold.remove(set) fold = sum(fold, []) test = set fold_actual = [r[-1]", "r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i in", "+ \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k)", "= size - sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for s in", "splits: sets.append([dataset[i] for i in range(offset, offset + s)]) offset += s return", "r2s, rmses_test, r2s_test def compute_stat(data): n = len(data) expectation = 0.0 for d", "(1 + i) * grad[k] for k in range(m + 1)] return model", "i in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE", "compute_grad_mse(model, row, actual) model = [model[k] - learning_rate / (1 + i) *", "y = 0.0 for i in range(m): y += row[i] * model[i] return", "- expectation) ** 2) / float(n) return expectation, sd ** 0.5 filepath =", "* row[i] * diff grad[m] += 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold,", "0.0 n = len(prediction) for i in range(n): mse += ((prediction[i] - actual[i])", "= len(row) grad = [0.0 for _ in range(m + 1)] diff =", "row in fold] test_pred = [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual))", "for j in range(len(fold)): row = fold[j] actual = actuals[j] grad = compute_grad_mse(model,", "model[m] def compute_grad_mse(model, row, actual): m = len(row) grad = [0.0 for _", "row) - actual for i in range(m): grad[i] += 2.0 * row[i] *", "[\"f\" + str(i) for i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i],", "list(sets) fold.remove(set) fold = sum(fold, []) test = set fold_actual = [r[-1] for", "r2_test[i]] + models[i] for i in range(cv_k) ] + \\ [ [stats[j][i] for", "compute_grad_mse(model, row, actual): m = len(row) grad = [0.0 for _ in range(m", "for f in r] for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler =", "models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n = len(data) expectation = 0.0", "actuals, learning_rate, iterations_count): m = len(fold[0]) model = [0.0 for _ in range(m", "return sets def compute_y(model, row): m = len(row) y = 0.0 for i", "sets.append([dataset[i] for i in range(offset, offset + s)]) offset += s return sets", "r2s_test = [] for set in sets: fold = list(sets) fold.remove(set) fold =", "j in range(len(stats))] for i in range(2) ] table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells)) figure", "[0.0 for _ in range(m + 1)] diff = compute_y(model, row) - actual", "k in range(m + 1)] return model def compute_rmse(prediction, actual): mse = 0.0", "[[float(f) for f in r] for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler", "= len(fold[0]) model = [0.0 for _ in range(m + 1)] for i", "denominator = 0.0 expect = 0.0 for i in range(len(actual)): nominator += (actual[i]", "learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model = [0.0 for _ in", "for i in range(iterations_count): for j in range(len(fold)): row = fold[j] actual =", "expect += actual[i] / float(len(actual)) for i in range(len(actual)): denominator += (actual[i] -", "= [] rmses_test = [] r2s_test = [] for set in sets: fold", "= [r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred", "cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size / cross_validation_k) for _ in", "+= d / float(n) sd = 0.0 for d in data: sd +=", "r in fold] test_actual = [r[-1] for r in test] test = [r[0:-1]", "= 0.0 for d in data: sd += ((d - expectation) ** 2)", "in data: sd += ((d - expectation) ** 2) / float(n) return expectation,", "+= row[i] * model[i] return y + model[m] def compute_grad_mse(model, row, actual): m", "test_actual = [r[-1] for r in test] test = [r[0:-1] for r in", "fold_actual = [r[-1] for r in fold] fold = [r[0:-1] for r in", "in range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets = list() offset", "offset = 0 for s in splits: sets.append([dataset[i] for i in range(offset, offset", "actual[i]) ** 2) / float(n) return mse ** 0.5 def compute_r2(prediction, actual): nominator", "= [model[k] - learning_rate / (1 + i) * grad[k] for k in", "0.0 for i in range(m): y += row[i] * model[i] return y +", "for i in range(offset, offset + s)]) offset += s return sets def", "range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr]", "values = [\"X\"] + [\"Fold\" + str(i) for i in range(cv_k)] + [\"E\",\"SD\"]", "list() offset = 0 for s in splits: sets.append([dataset[i] for i in range(offset,", "* grad[k] for k in range(m + 1)] return model def compute_rmse(prediction, actual):", "sd = 0.0 for d in data: sd += ((d - expectation) **", "0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train,", "in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for", "size = len(dataset) splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k", "model = [model[k] - learning_rate / (1 + i) * grad[k] for k", "= [compute_y(model, row) for row in fold] test_pred = [compute_y(model, row) for row", "+= ((d - expectation) ** 2) / float(n) return expectation, sd ** 0.5", "for i in range(m): y += row[i] * model[i] return y + model[m]", "r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test", "+ s)]) offset += s return sets def compute_y(model, row): m = len(row)", "for i in range(len(actual)): expect += actual[i] / float(len(actual)) for i in range(len(actual)):", "range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2", "n = len(prediction) for i in range(n): mse += ((prediction[i] - actual[i]) **", "row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return", "[r[0:-1] for r in fold] test_actual = [r[-1] for r in test] test", "def compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0 expect = 0.0 for", "test_pred = [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred,", "for i in range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data) for data", "for _ in range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets =", "grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model = [0.0 for", "grad[k] for k in range(m + 1)] return model def compute_rmse(prediction, actual): mse", "= [r[-1] for r in fold] fold = [r[0:-1] for r in fold]", "+ models[i] for i in range(cv_k) ] + \\ [ [stats[j][i] for j", "in range(m + 1)] return model def compute_rmse(prediction, actual): mse = 0.0 n", "splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets = list() offset = 0", "iterations = 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train,", "expectation = 0.0 for d in data: expectation += d / float(n) sd", "sets: fold = list(sets) fold.remove(set) fold = sum(fold, []) test = set fold_actual", "r in fold] fold = [r[0:-1] for r in fold] test_actual = [r[-1]", "range(len(fold)): row = fold[j] actual = actuals[j] grad = compute_grad_mse(model, row, actual) model", "import reader from sklearn import preprocessing from plotly import graph_objects def import_data(path): return", "data: sd += ((d - expectation) ** 2) / float(n) return expectation, sd", "+ str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\",", "mse ** 0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0 expect", "2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0])", "\\ [ [stats[j][i] for j in range(len(stats))] for i in range(2) ] table", "actual) model = [model[k] - learning_rate / (1 + i) * grad[k] for", "import preprocessing from plotly import graph_objects def import_data(path): return [[float(f) for f in", "actual[i] / float(len(actual)) for i in range(len(actual)): denominator += (actual[i] - expect) **", "for s in splits: sets.append([dataset[i] for i in range(offset, offset + s)]) offset", "actual): mse = 0.0 n = len(prediction) for i in range(n): mse +=", "[\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] +", "r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i in range(cv_k)] for", "[[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data) for", "= [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values =", "(actual[i] - expect) ** 2 return 1 - float(nominator) / float(denominator) def run_learning(sets,", "= [] r2s = [] rmses_test = [] r2s_test = [] for set", "compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0 expect = 0.0 for i", "for _ in range(m + 1)] for i in range(iterations_count): for j in", "row[i] * model[i] return y + model[m] def compute_grad_mse(model, row, actual): m =", "float(n) return mse ** 0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator =", "for d in data: expectation += d / float(n) sd = 0.0 for", "2) / float(n) return mse ** 0.5 def compute_r2(prediction, actual): nominator = 0.0", "1)] for i in range(iterations_count): for j in range(len(fold)): row = fold[j] actual", "in range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] +", "for row in fold] test_pred = [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred,", "+= 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m =", "for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"] +", "] + \\ [ [stats[j][i] for j in range(len(stats))] for i in range(2)", "range(m + 1)] diff = compute_y(model, row) - actual for i in range(m):", "/ (1 + i) * grad[k] for k in range(m + 1)] return", "m = len(row) y = 0.0 for i in range(m): y += row[i]", "_ in range(m + 1)] diff = compute_y(model, row) - actual for i", "i in range(n): mse += ((prediction[i] - actual[i]) ** 2) / float(n) return", "[stats[j][i] for j in range(len(stats))] for i in range(2) ] table = graph_objects.Table(header=dict(values=values),", "y += row[i] * model[i] return y + model[m] def compute_grad_mse(model, row, actual):", "= sum(fold, []) test = set fold_actual = [r[-1] for r in fold]", "for i in range(n): mse += ((prediction[i] - actual[i]) ** 2) / float(n)", "[compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred,", "(train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i) for i", "sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations)", "in fold] test_pred = [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred,", "for j in range(len(stats))] for i in range(2) ] table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells))", "row = fold[j] actual = actuals[j] grad = compute_grad_mse(model, row, actual) model =", "= 0.0 n = len(prediction) for i in range(n): mse += ((prediction[i] -", "= normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets,", "m = len(fold[0]) model = [0.0 for _ in range(m + 1)] for", "in range(n): mse += ((prediction[i] - actual[i]) ** 2) / float(n) return mse", "sum(fold, []) test = set fold_actual = [r[-1] for r in fold] fold", "+= ((prediction[i] - actual[i]) ** 2) / float(n) return mse ** 0.5 def", "return model def compute_rmse(prediction, actual): mse = 0.0 n = len(prediction) for i", "= 0.0 denominator = 0.0 expect = 0.0 for i in range(len(actual)): nominator", "in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist()", "iterations_count) fold_pred = [compute_y(model, row) for row in fold] test_pred = [compute_y(model, row)", "def compute_y(model, row): m = len(row) y = 0.0 for i in range(m):", "grad = compute_grad_mse(model, row, actual) model = [model[k] - learning_rate / (1 +", "cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr =", "scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size =", "def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k):", "r in test] test = [r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold,", "= learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row in fold]", "0.0 for d in data: expectation += d / float(n) sd = 0.0", "model[i] return y + model[m] def compute_grad_mse(model, row, actual): m = len(row) grad", "len(data) expectation = 0.0 for d in data: expectation += d / float(n)", "grad[i] += 2.0 * row[i] * diff grad[m] += 2.0 * diff return", "[rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] + \\", "def compute_rmse(prediction, actual): mse = 0.0 n = len(prediction) for i in range(n):", "preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits", "[\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i) for", "[] r2s_test = [] for set in sets: fold = list(sets) fold.remove(set) fold", "in range(m): grad[i] += 2.0 * row[i] * diff grad[m] += 2.0 *", "data: expectation += d / float(n) sd = 0.0 for d in data:", "return 1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models = []", "return y + model[m] def compute_grad_mse(model, row, actual): m = len(row) grad =", "diff grad[m] += 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count):", "= compute_grad_mse(model, row, actual) model = [model[k] - learning_rate / (1 + i)", "models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for", "+ models_tr] values = [\"X\"] + [\"Fold\" + str(i) for i in range(cv_k)]", "0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0 expect = 0.0", "[ [stats[j][i] for j in range(len(stats))] for i in range(2) ] table =", "diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model =", "in r] for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized", "= [] rmses = [] r2s = [] rmses_test = [] r2s_test =", "in range(m + 1)] diff = compute_y(model, row) - actual for i in", "def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model = [0.0 for _", "expectation) ** 2) / float(n) return expectation, sd ** 0.5 filepath = \"features_var_1.csv\"", "fold_pred = [compute_y(model, row) for row in fold] test_pred = [compute_y(model, row) for", "learning_rate / (1 + i) * grad[k] for k in range(m + 1)]", "d in data: sd += ((d - expectation) ** 2) / float(n) return", "row): m = len(row) y = 0.0 for i in range(m): y +=", "+= (actual[i] - prediction[i]) ** 2 for i in range(len(actual)): expect += actual[i]", "= compute_y(model, row) - actual for i in range(m): grad[i] += 2.0 *", "nominator = 0.0 denominator = 0.0 expect = 0.0 for i in range(len(actual)):", "def compute_grad_mse(model, row, actual): m = len(row) grad = [0.0 for _ in", "for r in test] test = [r[0:-1] for r in test] model =", "range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train, r2_train,", "0.0 for i in range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2 for", "offset + s)]) offset += s return sets def compute_y(model, row): m =", "float(len(actual)) for i in range(len(actual)): denominator += (actual[i] - expect) ** 2 return", "+ [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"]", "diff = compute_y(model, row) - actual for i in range(m): grad[i] += 2.0", "in data: expectation += d / float(n) sd = 0.0 for d in", "rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] + \\ [ [stats[j][i]", "for d in data: sd += ((d - expectation) ** 2) / float(n)", "models_tr] values = [\"X\"] + [\"Fold\" + str(i) for i in range(cv_k)] +", "+= s return sets def compute_y(model, row): m = len(row) y = 0.0", "2.0 * row[i] * diff grad[m] += 2.0 * diff return grad def", "models[i] for i in range(cv_k) ] + \\ [ [stats[j][i] for j in", "= 5 learn_rate = 0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets =", "- actual[i]) ** 2) / float(n) return mse ** 0.5 def compute_r2(prediction, actual):", "\"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i) for i in range(len(dataset[0]))] ]", "i in range(m): y += row[i] * model[i] return y + model[m] def", "= 0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models,", "fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test def", "test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses,", "actual): nominator = 0.0 denominator = 0.0 expect = 0.0 for i in", "plotly import graph_objects def import_data(path): return [[float(f) for f in r] for r", "= run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i in range(cv_k)] for j", "range(len(stats))] for i in range(2) ] table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells)) figure = graph_objects.Figure(data=[table])", "return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size / cross_validation_k)", "fold] test_pred = [compute_y(model, row) for row in test] rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual))", "scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size /", "+ 1)] for i in range(iterations_count): for j in range(len(fold)): row = fold[j]", "2) / float(n) return expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k =", "= len(prediction) for i in range(n): mse += ((prediction[i] - actual[i]) ** 2)", "[compute_y(model, row) for row in fold] test_pred = [compute_y(model, row) for row in", "float(denominator) def run_learning(sets, learning_rate, iterations_count): models = [] rmses = [] r2s =", "i in range(len(actual)): expect += actual[i] / float(len(actual)) for i in range(len(actual)): denominator", "in range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2 for i in range(len(actual)):", "2 for i in range(len(actual)): expect += actual[i] / float(len(actual)) for i in", "2 return 1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models =", "[] rmses_test = [] r2s_test = [] for set in sets: fold =", "\"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset,", "(test)\"] + [\"f\" + str(i) for i in range(len(dataset[0]))] ] + \\ [", "def compute_stat(data): n = len(data) expectation = 0.0 for d in data: expectation", "((d - expectation) ** 2) / float(n) return expectation, sd ** 0.5 filepath", "size - sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for s in splits:", "r] for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized =", "in sets: fold = list(sets) fold.remove(set) fold = sum(fold, []) test = set", "compute_stat(data): n = len(data) expectation = 0.0 for d in data: expectation +=", "r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"] + [\"Fold\" + str(i) for", "+ model[m] def compute_grad_mse(model, row, actual): m = len(row) grad = [0.0 for", "= [r[-1] for r in test] test = [r[0:-1] for r in test]", "\\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ]", "learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row in fold] test_pred", "actual): m = len(row) grad = [0.0 for _ in range(m + 1)]", "grad = [0.0 for _ in range(m + 1)] diff = compute_y(model, row)", "- learning_rate / (1 + i) * grad[k] for k in range(m +", "float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models = [] rmses = []", "rmses.append(compute_rmse(fold_pred, fold_actual)) r2s.append(compute_r2(fold_pred, fold_actual)) rmses_test.append(compute_rmse(test_pred, test_actual)) r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s,", "0.0 for d in data: sd += ((d - expectation) ** 2) /", "1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models = [] rmses", "_ in range(m + 1)] for i in range(iterations_count): for j in range(len(fold)):", "row) for row in fold] test_pred = [compute_y(model, row) for row in test]", "= [r[0:-1] for r in fold] test_actual = [r[-1] for r in test]", "for r in fold] test_actual = [r[-1] for r in test] test =", "rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j] for i", "= 0.0 for i in range(m): y += row[i] * model[i] return y", "= 0.0 expect = 0.0 for i in range(len(actual)): nominator += (actual[i] -", "r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return", "reader from sklearn import preprocessing from plotly import graph_objects def import_data(path): return [[float(f)", "= [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" +", "from sklearn import preprocessing from plotly import graph_objects def import_data(path): return [[float(f) for", "models_tr = [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))] stats =", "i in range(m): grad[i] += 2.0 * row[i] * diff grad[m] += 2.0", "len(fold[0]) model = [0.0 for _ in range(m + 1)] for i in", "test = set fold_actual = [r[-1] for r in fold] fold = [r[0:-1]", "1] = size - sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for s", "(test)\", \"R2 (test)\"] + [\"f\" + str(i) for i in range(len(dataset[0]))] ] +", "0.0 expect = 0.0 for i in range(len(actual)): nominator += (actual[i] - prediction[i])", "models = [] rmses = [] r2s = [] rmses_test = [] r2s_test", "normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size = len(dataset) splits = [int(size / cross_validation_k) for", "i in range(len(actual)): denominator += (actual[i] - expect) ** 2 return 1 -", "for i in range(len(actual)): denominator += (actual[i] - expect) ** 2 return 1", "+= (actual[i] - expect) ** 2 return 1 - float(nominator) / float(denominator) def", "[\"Fold\" + str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE", "i in range(offset, offset + s)]) offset += s return sets def compute_y(model,", "for i in range(m): grad[i] += 2.0 * row[i] * diff grad[m] +=", "r2s = [] rmses_test = [] r2s_test = [] for set in sets:", "cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets", "- 1] = size - sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for", "cv_k = 5 learn_rate = 0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets", "[ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i)", "(train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\" + str(i) for i in range(len(dataset[0]))]", "iterations_count): models = [] rmses = [] r2s = [] rmses_test = []", "set fold_actual = [r[-1] for r in fold] fold = [r[0:-1] for r", "= [\"X\"] + [\"Fold\" + str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells", "cross_validation_k): size = len(dataset) splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)]", "= 0 for s in splits: sets.append([dataset[i] for i in range(offset, offset +", "compute_y(model, row): m = len(row) y = 0.0 for i in range(m): y", "\"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations = 50 dataset = normalize_data(import_data(filepath))", "s in splits: sets.append([dataset[i] for i in range(offset, offset + s)]) offset +=", "from csv import reader from sklearn import preprocessing from plotly import graph_objects def", "rmses = [] r2s = [] rmses_test = [] r2s_test = [] for", "/ float(denominator) def run_learning(sets, learning_rate, iterations_count): models = [] rmses = [] r2s", "fold = sum(fold, []) test = set fold_actual = [r[-1] for r in", "splits = [int(size / cross_validation_k) for _ in range(cross_validation_k)] splits[cross_validation_k - 1] =", "= set fold_actual = [r[-1] for r in fold] fold = [r[0:-1] for", "r2_test] + models_tr] values = [\"X\"] + [\"Fold\" + str(i) for i in", "for i in range(2) ] table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells)) figure = graph_objects.Figure(data=[table]) figure.show()", "sum(splits[0:cross_validation_k-1]) sets = list() offset = 0 for s in splits: sets.append([dataset[i] for", "[r[-1] for r in test] test = [r[0:-1] for r in test] model", "r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row)", "range(m + 1)] for i in range(iterations_count): for j in range(len(fold)): row =", "y + model[m] def compute_grad_mse(model, row, actual): m = len(row) grad = [0.0", "normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def cross_validation_split(dataset, cross_validation_k): size", "in test] test = [r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual,", "n = len(data) expectation = 0.0 for d in data: expectation += d", "in range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train,", "+ i) * grad[k] for k in range(m + 1)] return model def", "* diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model", "[] r2s = [] rmses_test = [] r2s_test = [] for set in", "nominator += (actual[i] - prediction[i]) ** 2 for i in range(len(actual)): expect +=", "** 2) / float(n) return expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k", "range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i", "= 0.0 for i in range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2", "** 0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0 expect =", "set in sets: fold = list(sets) fold.remove(set) fold = sum(fold, []) test =", "expectation += d / float(n) sd = 0.0 for d in data: sd", "1)] return model def compute_rmse(prediction, actual): mse = 0.0 n = len(prediction) for", "actual = actuals[j] grad = compute_grad_mse(model, row, actual) model = [model[k] - learning_rate", "in fold] test_actual = [r[-1] for r in test] test = [r[0:-1] for", "i) * grad[k] for k in range(m + 1)] return model def compute_rmse(prediction,", "in range(m + 1)] for i in range(iterations_count): for j in range(len(fold)): row", "r2s_test.append(compute_r2(test_pred, test_actual)) models.append(model) return models, rmses, r2s, rmses_test, r2s_test def compute_stat(data): n =", "range(offset, offset + s)]) offset += s return sets def compute_y(model, row): m", "learning_rate, iterations_count): models = [] rmses = [] r2s = [] rmses_test =", "in range(iterations_count): for j in range(len(fold)): row = fold[j] actual = actuals[j] grad", "filepath = \"features_var_1.csv\" cv_k = 5 learn_rate = 0.01 iterations = 50 dataset", "from plotly import graph_objects def import_data(path): return [[float(f) for f in r] for", "[rmse_train, r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"] + [\"Fold\" + str(i)", "sets = list() offset = 0 for s in splits: sets.append([dataset[i] for i", "return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m = len(fold[0]) model = [0.0", "= [[models[i][j] for i in range(cv_k)] for j in range(len(dataset[0]))] stats = [compute_stat(data)", "in fold] fold = [r[0:-1] for r in fold] test_actual = [r[-1] for", "- expect) ** 2 return 1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate,", "def run_learning(sets, learning_rate, iterations_count): models = [] rmses = [] r2s = []", "[model[k] - learning_rate / (1 + i) * grad[k] for k in range(m", "prediction[i]) ** 2 for i in range(len(actual)): expect += actual[i] / float(len(actual)) for", "in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"] + [\"Fold\" +", "model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model, row) for row in", "= len(data) expectation = 0.0 for d in data: expectation += d /", "reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1)) normalized = scaler.fit_transform(dataset) return normalized.tolist() def", "[compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test] + models_tr] values = [\"X\"]", "expect = 0.0 for i in range(len(actual)): nominator += (actual[i] - prediction[i]) **", "grad[m] += 2.0 * diff return grad def learn_stochastic_gradient_decent_mse(fold, actuals, learning_rate, iterations_count): m", "= len(row) y = 0.0 for i in range(m): y += row[i] *", "s return sets def compute_y(model, row): m = len(row) y = 0.0 for", "in range(offset, offset + s)]) offset += s return sets def compute_y(model, row):", "in range(len(stats))] for i in range(2) ] table = graph_objects.Table(header=dict(values=values), cells=dict(values=cells)) figure =", "denominator += (actual[i] - expect) ** 2 return 1 - float(nominator) / float(denominator)", "[\"X\"] + [\"Fold\" + str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells =", "= [] for set in sets: fold = list(sets) fold.remove(set) fold = sum(fold,", "1)] diff = compute_y(model, row) - actual for i in range(m): grad[i] +=", "- prediction[i]) ** 2 for i in range(len(actual)): expect += actual[i] / float(len(actual))", "iterations_count): m = len(fold[0]) model = [0.0 for _ in range(m + 1)]", "+ 1)] return model def compute_rmse(prediction, actual): mse = 0.0 n = len(prediction)", "for i in range(cv_k) ] + \\ [ [stats[j][i] for j in range(len(stats))]", "** 2 return 1 - float(nominator) / float(denominator) def run_learning(sets, learning_rate, iterations_count): models", "+= actual[i] / float(len(actual)) for i in range(len(actual)): denominator += (actual[i] - expect)", "i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]] + models[i]", "fold = [r[0:-1] for r in fold] test_actual = [r[-1] for r in", "rmses_test = [] r2s_test = [] for set in sets: fold = list(sets)", "5 learn_rate = 0.01 iterations = 50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset,", "import graph_objects def import_data(path): return [[float(f) for f in r] for r in", "[r[0:-1] for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred =", "+ [\"f\" + str(i) for i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i],", "/ float(len(actual)) for i in range(len(actual)): denominator += (actual[i] - expect) ** 2", "d in data: expectation += d / float(n) sd = 0.0 for d", "in range(cv_k) ] + \\ [ [stats[j][i] for j in range(len(stats))] for i", "range(len(actual)): denominator += (actual[i] - expect) ** 2 return 1 - float(nominator) /", "cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\", \"R2 (test)\"] + [\"f\"", "range(n): mse += ((prediction[i] - actual[i]) ** 2) / float(n) return mse **", "r2s_test def compute_stat(data): n = len(data) expectation = 0.0 for d in data:", "range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2 for i in range(len(actual)): expect", "range(iterations_count): for j in range(len(fold)): row = fold[j] actual = actuals[j] grad =", "row, actual): m = len(row) grad = [0.0 for _ in range(m +", "= list() offset = 0 for s in splits: sets.append([dataset[i] for i in", "learning_rate, iterations_count): m = len(fold[0]) model = [0.0 for _ in range(m +", "return mse ** 0.5 def compute_r2(prediction, actual): nominator = 0.0 denominator = 0.0", "str(i) for i in range(len(dataset[0]))] ] + \\ [ [rmse_train[i], r2_train[i], rmse_test[i], r2_test[i]]", "r2_train[i], rmse_test[i], r2_test[i]] + models[i] for i in range(cv_k) ] + \\ [", "50 dataset = normalize_data(import_data(filepath)) sets = cross_validation_split(dataset, cv_k) models, rmse_train, r2_train, rmse_test, r2_test", "for r in test] model = learn_stochastic_gradient_decent_mse(fold, fold_actual, learning_rate, iterations_count) fold_pred = [compute_y(model,", "= 0.0 for d in data: expectation += d / float(n) sd =", "+ [\"Fold\" + str(i) for i in range(cv_k)] + [\"E\",\"SD\"] cells = [", "float(n) sd = 0.0 for d in data: sd += ((d - expectation)", "return [[float(f) for f in r] for r in reader(open(path, \"r\"))] def normalize_data(dataset):", "in range(len(actual)): denominator += (actual[i] - expect) ** 2 return 1 - float(nominator)", "= fold[j] actual = actuals[j] grad = compute_grad_mse(model, row, actual) model = [model[k]", "def import_data(path): return [[float(f) for f in r] for r in reader(open(path, \"r\"))]", "f in r] for r in reader(open(path, \"r\"))] def normalize_data(dataset): scaler = preprocessing.MinMaxScaler(feature_range=(0,1))", "in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\", \"RMSE (test)\",", "for k in range(m + 1)] return model def compute_rmse(prediction, actual): mse =", "compute_y(model, row) - actual for i in range(m): grad[i] += 2.0 * row[i]", "((prediction[i] - actual[i]) ** 2) / float(n) return mse ** 0.5 def compute_r2(prediction,", "cv_k) models, rmse_train, r2_train, rmse_test, r2_test = run_learning(sets, learn_rate, iterations) models_tr = [[models[i][j]", "range(cross_validation_k)] splits[cross_validation_k - 1] = size - sum(splits[0:cross_validation_k-1]) sets = list() offset =", "for i in range(cv_k)] + [\"E\",\"SD\"] cells = [ [\"RMSE (train)\", \"R2 (train)\",", "i in range(len(actual)): nominator += (actual[i] - prediction[i]) ** 2 for i in", "fold] test_actual = [r[-1] for r in test] test = [r[0:-1] for r", "j in range(len(dataset[0]))] stats = [compute_stat(data) for data in [rmse_train, r2_train, rmse_test, r2_test]", "float(n) return expectation, sd ** 0.5 filepath = \"features_var_1.csv\" cv_k = 5 learn_rate", "fold = list(sets) fold.remove(set) fold = sum(fold, []) test = set fold_actual =" ]
[ "b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21))", "summation is more precise because the error of the number is proportional to", "error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is: \",error)", "temporary script file. \"\"\" import numpy as np import matplotlib.pyplot as plt N", "np import matplotlib.pyplot as plt N = int(input(\"Pick a number: \")) n, a,", "b = 1/m sum2+=b m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For", "import numpy as np import matplotlib.pyplot as plt N = int(input(\"Pick a number:", "1/n sum1+=a n = n+1 while (m>=1): b = 1/m sum2+=b m =", "(m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11 -", "a1 = 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1", "to the number you are summing, therefore as the number decreases, there is", "coding: utf-8 -*- \"\"\" Spyder Editor This is a temporary script file. \"\"\"", "= 1, 0, 0, N, 0, 0 while (n<=N): a = 1/n sum1+=a", "error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\")", "print(\"The down summation is more precise because the error of the number is", "is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1, b1, sum21", "Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs.", "you are summing, therefore as the number decreases, there is less error, versus", "decreases, there is less error, versus starting with a small number, and adding", "N = :\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1,", "b1, sum21 = 1, 0, 0, N1, 0, 0 while (n1<=N1): a1 =", "= m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error", "plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation is more precise because the", "= m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\")", "are summing, therefore as the number decreases, there is less error, versus starting", "sum11+=a1 n1 = n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1", "is less error, versus starting with a small number, and adding more error", "= 1/m sum2+=b m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N", "error, versus starting with a small number, and adding more error for every", "error of the number is proportional to the number you are summing, therefore", "sum11, m1, b1, sum21 = 1, 0, 0, N1, 0, 0 while (n1<=N1):", "more precise because the error of the number is proportional to the number", ":\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11,", "- sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[]", "0, 0 while (n<=N): a = 1/n sum1+=a n = n+1 while (m>=1):", "number decreases, there is less error, versus starting with a small number, and", "while N1<=N: n1, a1, sum11, m1, b1, sum21 = 1, 0, 0, N1,", "= int(input(\"Pick a number: \")) n, a, sum1, m, b, sum2 = 1,", "1, 0, 0, N, 0, 0 while (n<=N): a = 1/n sum1+=a n", "1, 0, 0, N1, 0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1", "sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-')", "= :\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1,", "(sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The", "Errors=[] while N1<=N: n1, a1, sum11, m1, b1, sum21 = 1, 0, 0,", "= (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is: \",error) N1=1", "= 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1", "file. \"\"\" import numpy as np import matplotlib.pyplot as plt N = int(input(\"Pick", "is more precise because the error of the number is proportional to the", "-*- coding: utf-8 -*- \"\"\" Spyder Editor This is a temporary script file.", "sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[] while", "numpy as np import matplotlib.pyplot as plt N = int(input(\"Pick a number: \"))", "m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the", "plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation is more precise because", "1/m sum2+=b m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N =", "(n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1 = 1/m1", "0, N1, 0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1", "N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1, b1, sum21 = 1,", "therefore as the number decreases, there is less error, versus starting with a", "n = n+1 while (m>=1): b = 1/m sum2+=b m = m-1 error", "This is a temporary script file. \"\"\" import numpy as np import matplotlib.pyplot", "n+1 while (m>=1): b = 1/m sum2+=b m = m-1 error = (sum1", "= 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1)", "plt.show() print(\"The down summation is more precise because the error of the number", "N = int(input(\"Pick a number: \")) n, a, sum1, m, b, sum2 =", "(sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is: \",error) N1=1 Ns=[]", "print(\"For N = :\",N,\", the error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N:", "<reponame>bswood9321/PHYS-3210 # -*- coding: utf-8 -*- \"\"\" Spyder Editor This is a temporary", "= n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1", "a number: \")) n, a, sum1, m, b, sum2 = 1, 0, 0,", "of the number is proportional to the number you are summing, therefore as", "as the number decreases, there is less error, versus starting with a small", "Error\") plt.show() print(\"The down summation is more precise because the error of the", "- sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down", "m1 = m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\")", "int(input(\"Pick a number: \")) n, a, sum1, m, b, sum2 = 1, 0,", "error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1, b1,", "# -*- coding: utf-8 -*- \"\"\" Spyder Editor This is a temporary script", "1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1 =", "Spyder Editor This is a temporary script file. \"\"\" import numpy as np", "0, N, 0, 0 while (n<=N): a = 1/n sum1+=a n = n+1", "as np import matplotlib.pyplot as plt N = int(input(\"Pick a number: \")) n,", "Editor This is a temporary script file. \"\"\" import numpy as np import", "number you are summing, therefore as the number decreases, there is less error,", "\",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1, b1, sum21 =", "-*- \"\"\" Spyder Editor This is a temporary script file. \"\"\" import numpy", "number is proportional to the number you are summing, therefore as the number", "while (n<=N): a = 1/n sum1+=a n = n+1 while (m>=1): b =", "= (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show()", "the number you are summing, therefore as the number decreases, there is less", "a, sum1, m, b, sum2 = 1, 0, 0, N, 0, 0 while", "utf-8 -*- \"\"\" Spyder Editor This is a temporary script file. \"\"\" import", "a temporary script file. \"\"\" import numpy as np import matplotlib.pyplot as plt", "N1<=N: n1, a1, sum11, m1, b1, sum21 = 1, 0, 0, N1, 0,", "while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1 =", "(m>=1): b = 1/m sum2+=b m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2))", "while (m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11", "sum1, m, b, sum2 = 1, 0, 0, N, 0, 0 while (n<=N):", "the number decreases, there is less error, versus starting with a small number,", "is proportional to the number you are summing, therefore as the number decreases,", "0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1):", "\"\"\" import numpy as np import matplotlib.pyplot as plt N = int(input(\"Pick a", "plt N = int(input(\"Pick a number: \")) n, a, sum1, m, b, sum2", "N, 0, 0 while (n<=N): a = 1/n sum1+=a n = n+1 while", "less error, versus starting with a small number, and adding more error for", "= 1, 0, 0, N1, 0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1", "N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation is more", "proportional to the number you are summing, therefore as the number decreases, there", "sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation", "there is less error, versus starting with a small number, and adding more", "0, 0, N, 0, 0 while (n<=N): a = 1/n sum1+=a n =", "import matplotlib.pyplot as plt N = int(input(\"Pick a number: \")) n, a, sum1,", "the number is proportional to the number you are summing, therefore as the", "because the error of the number is proportional to the number you are", "sum2+=b m = m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\",", "is a temporary script file. \"\"\" import numpy as np import matplotlib.pyplot as", "as plt N = int(input(\"Pick a number: \")) n, a, sum1, m, b,", "matplotlib.pyplot as plt N = int(input(\"Pick a number: \")) n, a, sum1, m,", "\"\"\" Spyder Editor This is a temporary script file. \"\"\" import numpy as", "Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation is", "sum2 = 1, 0, 0, N, 0, 0 while (n<=N): a = 1/n", "plt.title(\"N vs. Error\") plt.show() print(\"The down summation is more precise because the error", "= 1/n sum1+=a n = n+1 while (m>=1): b = 1/m sum2+=b m", "N1, 0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1 while", "0, 0, N1, 0, 0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 =", "m1, b1, sum21 = 1, 0, 0, N1, 0, 0 while (n1<=N1): a1", "versus starting with a small number, and adding more error for every summation.\")", "summing, therefore as the number decreases, there is less error, versus starting with", "sum21 = 1, 0, 0, N1, 0, 0 while (n1<=N1): a1 = 1/n1", "(n<=N): a = 1/n sum1+=a n = n+1 while (m>=1): b = 1/m", "precise because the error of the number is proportional to the number you", "script file. \"\"\" import numpy as np import matplotlib.pyplot as plt N =", "the error is: \",error) N1=1 Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1,", "1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1", "Ns=[] Errors=[] while N1<=N: n1, a1, sum11, m1, b1, sum21 = 1, 0,", "n1, a1, sum11, m1, b1, sum21 = 1, 0, 0, N1, 0, 0", "vs. Error\") plt.show() print(\"The down summation is more precise because the error of", "n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1) error1 =", "m1-1 Ns.append(N1) error1 = (sum11 - sum21)/(np.absolute(sum11)+np.absolute(sum21)) Errors.append(error1) N1=N1+1 plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N", "down summation is more precise because the error of the number is proportional", "m-1 error = (sum1 - sum2)/(np.absolute(sum1)+np.absolute(sum2)) print(\"For N = :\",N,\", the error is:", "n1 = n1+1 while (m1>=1): b1 = 1/m1 sum21+=b1 m1 = m1-1 Ns.append(N1)", "plt.plot(Ns,Errors,'r-') plt.xlabel(\"N\") plt.ylabel(\"Error(x10^-16)\") plt.title(\"N vs. Error\") plt.show() print(\"The down summation is more precise", "\")) n, a, sum1, m, b, sum2 = 1, 0, 0, N, 0,", "= n+1 while (m>=1): b = 1/m sum2+=b m = m-1 error =", "number: \")) n, a, sum1, m, b, sum2 = 1, 0, 0, N,", "a1, sum11, m1, b1, sum21 = 1, 0, 0, N1, 0, 0 while", "0 while (n1<=N1): a1 = 1/n1 sum11+=a1 n1 = n1+1 while (m1>=1): b1", "the error of the number is proportional to the number you are summing,", "n, a, sum1, m, b, sum2 = 1, 0, 0, N, 0, 0", "0 while (n<=N): a = 1/n sum1+=a n = n+1 while (m>=1): b", "a = 1/n sum1+=a n = n+1 while (m>=1): b = 1/m sum2+=b", "while (m>=1): b = 1/m sum2+=b m = m-1 error = (sum1 -", "m, b, sum2 = 1, 0, 0, N, 0, 0 while (n<=N): a", "b, sum2 = 1, 0, 0, N, 0, 0 while (n<=N): a =", "sum1+=a n = n+1 while (m>=1): b = 1/m sum2+=b m = m-1" ]
[ "__init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device", "data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long()", "forward_test(self, data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data)", "= data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] =", "None: if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif", "= { 'scores': output_dict['scores'], 'target': target, } return model_output def forward_test(self, data): model_output", "image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(),", "from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from", "else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda()", "self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else", "return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is not None", "''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) '''", "= kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available()", "data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent", "= data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'], 'target':", "params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores': output_dict,", "VQA_MODELS import torch import copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder,", "data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output", "params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(),", "data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device)", "from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import", "**kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device =", "'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path =", "params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] =", "self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not None:", "data): params = copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim') is not", "self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data,", "image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) ==", "['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path)", "params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), }", "else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not None: if", "load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs):", "(params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores': output_dict, 'target': target_dict}", "params.get('feats') is not None and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if", "if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model =", "model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats')", "imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import ClassificationModel", "# ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques']", "is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1)", "output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not", "} return model_output def forward_test(self, data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self,", "= data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output = {", "= copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim') is not None: image_mask", "training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type ==", "elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad =", "= image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(),", "imix.models.builder import VQA_MODELS import torch import copy ''' from transformers.modeling_bert import ( BertConfig,", "def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes =", "= args['pretrained_path'] if pretrained_path is not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans", "copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim') is not None: image_mask =", "import copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel,", "target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(),", "= args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining':", "image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict", "import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from", "if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask =", "in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2':", "import load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self,", "model_output = { 'scores': output_dict['scores'], 'target': target, } return model_output def forward_test(self, data):", "= self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not", "BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from", "import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import", "= args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if", "output_dict['scores'], 'target': target, } return model_output def forward_test(self, data): model_output = self.forward_train(data) return", "visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict =", "import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__()", "= json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for", "in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device)", "def forward_test(self, data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data): params =", "is not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model,", "p.requires_grad = False def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats =", "= LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if", "params = copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim') is not None:", "None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size())", "{ 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output", "== 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path", "pretrained_path is not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path,", "params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores': output_dict, 'target': target_dict} return", "freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs): #", "args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model", "boxes, sent) model_output = { 'scores': output_dict['scores'], 'target': target, } return model_output def", "self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model =", ") ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import", "forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim') is", "False def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes", "input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'],", "''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa", "len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask']", "self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats", "'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output =", "image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model(", "None and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()):", ".lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params']", "self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path']", "visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict = {", "if params.get('feats') is not None and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1]))", "None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(),", "super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda'", "not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert", "# BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel", "args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type", "<gh_stars>0 from imix.models.builder import VQA_MODELS import torch import copy ''' from transformers.modeling_bert import", "self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters():", "boxes = data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes,", "'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(),", "params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if", "model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is not None and", "'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self,", "= self.forward_train(data) return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is", "from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args =", "p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs): # ques_id =", "data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'], 'target': target,", "self.forward_train(data) return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is not", "self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is", "'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores': output_dict, 'target':", "BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table", "torch import copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm,", "training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args)", "= { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()),", "BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model", "= self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'], 'target': target, } return", "= data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent)", "visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels':", "import torch import copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, #", "= torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args)", "= False def forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device)", "output_dict = self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'], 'target': target, }", ".lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def", ") target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj':", "not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans)", "= image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict =", "if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type", "forward_train(self, data, **kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device)", "'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } }", "class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type", "def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type']", "== 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False def", "len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask", "( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining", "training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type ==", "params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()),", "self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base:", "is not None and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size())", "LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path", "BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import", "params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(),", "= None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask']", "torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain", "'gqa']: self.label2ans = json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if", "not None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans':", "if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train =", "training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False", "= (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size())", "{ 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr':", "target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'],", "(torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask", "params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': {", "params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(),", "return model_output def forward_test(self, data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data):", "@VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base']", "sent) model_output = { 'scores': output_dict['scores'], 'target': target, } return model_output def forward_test(self,", "BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class", "else 'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else:", "ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not None: if training_head_type in ['vqa2',", "for p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs): # ques_id", "**kwargs): # ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent =", "'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores': output_dict, 'target': target_dict} return model_output", "== len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] =", "< len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask <", "and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) < len(image_mask.size()): params['image_dim']", "params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask']", "not None and params.get('image_dim') is not None: image_mask = (torch.arange(params['feats'].size(-2)).expand(*params['feats'].size()[:-1])) if len(params['image_dim'].size()) <", "json.load(open(args.label2ans_path)) load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p", "import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base", "self.model = ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not None: if training_head_type", "torch.device('cuda' if torch.cuda.is_available() else 'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train", "'target': target, } return model_output def forward_test(self, data): model_output = self.forward_train(data) return model_output", "is not None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(),", "target, } return model_output def forward_test(self, data): model_output = self.forward_train(data) return model_output def", "assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else:", "= data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict =", "data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats,", "(params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()), } } model_output = {'scores':", "= data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target =", "import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module()", "copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, )", "label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in self.model.bert.parameters(): p.requires_grad", "< params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None output_dict = self.model( input_ids=params['input_ids'].cuda(),", "ques_id = data['ques_id'].to(self.device) feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target", "len(image_mask.size()) image_mask = image_mask < params['image_dim'] params['visual_attention_mask'] = image_mask.long() else: params['visual_attention_mask'] = None", "'cpu') if training_head_type == 'pretraining': self.model = LXMERTForPretraining(**args) self.forward_train = self.forward_train_pretrain else: self.model", "None output_dict = self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is", "len(image_mask.size()): params['image_dim'] = data['image_dim'].unsqueeze(-1) assert len(params['image_dim'].size()) == len(image_mask.size()) image_mask = image_mask < params['image_dim']", "LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type =", "sent = data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output =", "if params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(),", "self.model(feats, boxes, sent) model_output = { 'scores': output_dict['scores'], 'target': target, } return model_output", "'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat':", "kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else", "else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label': params['is_matched'].cuda(), 'ans': params['ans'].cuda(), 'obj_labels':", "LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert", "args = kwargs['params'] freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if", "args['pretrained_path'] if pretrained_path is not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans =", "'ans': params['ans'].cuda(), 'obj_labels': { 'obj': (params['det_obj_labels'].cuda(), params['det_obj_confs'].cuda()), 'attr': (params['det_attr_labels'].cuda(), params['det_attr_confs'].cuda()), 'feat': (params['det_feat'].cuda(), params['det_feat_mask'].cuda()),", "def forward_train_pretrain(self, data): params = copy.deepcopy(data) if params.get('feats') is not None and params.get('image_dim')", "load_lxmert_qa(pretrained_path, self.model, label2ans=self.label2ans) elif training_head_type == 'nlvr2': self.model.lxrt_encoder.load(pretrained_path) if freeze_base: for p in", "token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'], )", "if pretrained_path is not None: if training_head_type in ['vqa2', 'gqa']: self.label2ans = json.load(open(args.label2ans_path))", "'scores': output_dict['scores'], 'target': target, } return model_output def forward_test(self, data): model_output = self.forward_train(data)", "attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict", "data['ques'] target = data['target'].to(self.device) output_dict = self.model(feats, boxes, sent) model_output = { 'scores':", "= self.model( input_ids=params['input_ids'].cuda(), token_type_ids=params['segment_ids'].cuda(), attention_mask=params['input_mask'].cuda(), visual_feats=params['feats'].cuda(), visual_pos=params['pos'].cuda(), visual_attention_mask=params['visual_attention_mask'].cuda() if params['visual_attention_mask'] is not None", "json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args", "feats = data['feats'].to(self.device) boxes = data['boxes'].to(self.device) sent = data['ques'] target = data['target'].to(self.device) output_dict", "from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel):", "{ 'scores': output_dict['scores'], 'target': target, } return model_output def forward_test(self, data): model_output =", "params['visual_attention_mask'] is not None else params['visual_attention_mask'], ) target_dict = { 'masked_lm_labels': params['lm_label_ids'].cuda(), 'matched_label':", ".lxmert import LXMERTForPretraining from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json", "from imix.models.vqa_models.base_model import BaseModel from .lxmert_qa_answer_table import load_lxmert_qa import json from .lxmert import", "= ClassificationModel(**args) pretrained_path = args['pretrained_path'] if pretrained_path is not None: if training_head_type in", "model_output def forward_test(self, data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data): params", "freeze_base = args['freeze_base'] training_head_type = args['training_head_type'] self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "from imix.models.builder import VQA_MODELS import torch import copy ''' from transformers.modeling_bert import (", "transformers.modeling_bert import ( BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert", "pretrained_path = args['pretrained_path'] if pretrained_path is not None: if training_head_type in ['vqa2', 'gqa']:", "ClassificationModel @VQA_MODELS.register_module() class LXMERT(BaseModel): def __init__(self, **kwargs): super().__init__() args = kwargs['params'] freeze_base =", "data): model_output = self.forward_train(data) return model_output def forward_train_pretrain(self, data): params = copy.deepcopy(data) if", "import VQA_MODELS import torch import copy ''' from transformers.modeling_bert import ( BertConfig, BertEmbeddings,", "BertConfig, BertEmbeddings, BertEncoder, # BertLayerNorm, BertPreTrainedModel, ) ''' from .lxmert import LXMERTForPretraining from", "if freeze_base: for p in self.model.bert.parameters(): p.requires_grad = False def forward_train(self, data, **kwargs):" ]
[ "as EC from selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless')", "selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import sys,os options =", "webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit()", "sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password =", "import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser =", "= browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click()", "import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import", "expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors')", "except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]')", "= webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>'", "password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until(", "sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try:", "browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码", "= browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]')) )", "selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC", "import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\")", "输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn =", "import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions()", "options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]')", "browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try:", "from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from", "# 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn", "By from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import sys,os", "from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import", "browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password", "username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]')", "NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear()", "password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: #", "options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass", "password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH,", "browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx'", "browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element", "from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import sys,os options", "NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options)", "browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]')) ) print(\"网络已连接!\")", "try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]')) ) print(\"网络已连接!\") finally: browser.quit()", "options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException:", "selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser", "browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username =", "password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_)", "login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]')) ) print(\"网络已连接!\") finally:", "webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' #", "#!/bin/python3 from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By", "from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as", "= webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!')", "selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException", "# 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]')) ) print(\"网络已连接!\") finally: browser.quit() browser.quit()", "options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except", "import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from", "from selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu')", "WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions", "import By from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import NoSuchElementException import", "EC from selenium.common.exceptions import NoSuchElementException import sys,os options = webdriver.ChromeOptions() options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox')", "username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser,", "print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]')", "try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit() except NoSuchElementException: pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username", "= browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功", "selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support", "options.add_argument('--ignore-certificate-errors') options.add_argument('--headless') options.add_argument('--no-sandbox') options.add_argument('--disable-gpu') browser = webdriver.Chrome(options=options) browser.get(\"http://172.16.31.10/\") try: browser.find_element_by_xpath('//*[@id=\"logout\"]') print('网络已连接!') browser.quit() sys.exit()", "pass username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_)", "login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element = WebDriverWait(browser, 10).until( EC.presence_of_element_located((By.XPATH, '//*[@id=\"logout\"]'))", "username.clear() username.send_keys(username_) password.clear() password.send_keys(password_) login_btn = browser.find_element_by_xpath('//*[@id=\"login-account\"]') login_btn.click() try: # 页面一直循环,直到显示连接成功 element =", "webdriver from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions", "username_='2020xxxxxxxxxxx' password_='<PASSWORD>' # 输入用户名,密码 username = browser.find_element_by_xpath('//*[@id=\"username\"]') password = browser.find_element_by_xpath('//*[@id=\"password\"]') username.clear() username.send_keys(username_) password.clear()" ]
[ "migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='transfer',", "2.2.4 on 2019-09-11 14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic',", "[ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic',", "name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='transfer', field=models.ImageField(blank=True, upload_to=''),", "[ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField(", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations =", "), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='transfer', field=models.ImageField(blank=True, upload_to=''), ), ]", "Generated by Django 2.2.4 on 2019-09-11 14:12 from django.db import migrations, models class", "2019-09-11 14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users',", "operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ),", "] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations", "models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField(", "field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='transfer', field=models.ImageField(blank=True, upload_to=''), ),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations = [", "dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True),", "model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='transfer', field=models.ImageField(blank=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ]", "'0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18',", "Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152',", "on 2019-09-11 14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0004_auto_20190907_1334'),", "= [ ('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ),", "by Django 2.2.4 on 2019-09-11 14:12 from django.db import migrations, models class Migration(migrations.Migration):", "= [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic', name='classification18', field=models.TextField(blank=True), ), migrations.AddField(", "# Generated by Django 2.2.4 on 2019-09-11 14:12 from django.db import migrations, models", "('users', '0004_auto_20190907_1334'), ] operations = [ migrations.AddField( model_name='pic', name='classification152', field=models.TextField(blank=True), ), migrations.AddField( model_name='pic',", "Django 2.2.4 on 2019-09-11 14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies" ]
[ "{} Q = prio_dict.PriorityDictionary() Q[src] = 0 for v in Q: D[v] =", "path = [] while True: path.append(dst) if dst == src: break dst =", "or len(temp) < len(shortest_path): shortest_path = temp return shortest_path def is_connected(self, visited=None, src=None):", "src, dst): D,P = self.Dijkstra(G, src, dst) path = [] while True: path.append(dst)", "def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node):", "path is None: path = [] graph = self.vertices path = path +", "graph[src]: if v not in path: temp = self.find_path(v, dst, path) if temp:", "len(visited) != len(vertices): for v in graph[src]: if v not in visited: if", "D[w]: raise ValueError elif w not in Q or vw_length < Q[w]: Q[w]", "for v in graph[src]: if v not in visited: if self.is_connected(visited, v): return", "[] paths = [] for v in graph[src]: if v not in path:", "or vw_length < Q[w]: Q[w] = vw_length P[w] = v return (D,P) def", "in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def", "if src is None: src = vertices[0] visited.add(src) if len(visited) != len(vertices): for", "Q[w]: Q[w] = vw_length P[w] = v return (D,P) def shortest_path(self, G, src,", "D: if vw_length < D[w]: raise ValueError elif w not in Q or", "return [] paths = [] for v in graph[src]: if v not in", "G, src, dst=None): D = {} P = {} Q = prio_dict.PriorityDictionary() Q[src]", "P = {} Q = prio_dict.PriorityDictionary() Q[src] = 0 for v in Q:", "= [] for v in graph[src]: if v not in path: temp =", "self.find_all_paths(v, dst, path) for p in temp: paths.append(p) return paths def find_shortest_path(self, src,", "= set() graph = self.vertices vertices = list(graph.keys()) if src is None: src", "return True return False def Dijkstra(self, G, src, dst=None): D = {} P", "(D,P) def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src, dst) path =", "self.find_path(v, dst, path) if temp: return temp return None def find_all_paths(self, src, dst,", "= vertices[0] visited.add(src) if len(visited) != len(vertices): for v in graph[src]: if v", "def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node): if node in self.vertices:", "prio_dict class Graph: def __init__(self): self.vertices = {} def __str__(self): return str([key for", "self.vertices[src][dst] = weight def find_path(self, src, dst, path=None): if path is None: path", "= self.find_all_paths(v, dst, path) for p in temp: paths.append(p) return paths def find_shortest_path(self,", "+ [src] if src == dst: return path if src not in graph:", "graph: return None for v in graph[src]: if v not in path: temp", "self.vertices path = path + [src] if src == dst: return path if", "in path: temp = self.find_path(v, dst, path) if temp: return temp return None", "for v in graph[src]: if v not in path: temp = self.find_path(v, dst,", "= prio_dict.PriorityDictionary() Q[src] = 0 for v in Q: D[v] = Q[v] if", "Q = prio_dict.PriorityDictionary() Q[src] = 0 for v in Q: D[v] = Q[v]", "self.vertices.keys() def add_edge(self, src, dst, weight=0): if src not in self.vertices: self.add_vertex(src) if", "paths = [] for v in graph[src]: if v not in path: temp", "v not in path: temp = self.find_all_paths(v, dst, path) for p in temp:", "dst: return path if src not in graph: return None shortest_path = None", "[path] if src not in graph: return [] paths = [] for v", "src not in graph: return None shortest_path = None for v in graph[src]:", "True else: return True return False def Dijkstra(self, G, src, dst=None): D =", "v in graph[src]: if v not in path: temp = self.find_path(v, dst, path)", "temp: return temp return None def find_all_paths(self, src, dst, path=None): if path is", "vw_length P[w] = v return (D,P) def shortest_path(self, G, src, dst): D,P =", "node in self.vertices: return self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys() def", "Q[src] = 0 for v in Q: D[v] = Q[v] if v ==", "= path + [src] if src == dst: return path if src not", "if src not in self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst]", "v in graph[src]: if v not in visited: if self.is_connected(visited, v): return True", "for v in Q: D[v] = Q[v] if v == dst: break for", "self.is_connected(visited, v): return True else: return True return False def Dijkstra(self, G, src,", "in path: temp = self.find_shortest_path(v, dst, path) if temp: if not shortest_path or", "D[v] = Q[v] if v == dst: break for w in G[v]: vw_length", "!= len(vertices): for v in graph[src]: if v not in visited: if self.is_connected(visited,", "def __str__(self): return str([key for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def", "if src not in graph: return None shortest_path = None for v in", "is None: path = [] graph = self.vertices path = path + [src]", "= vw_length P[w] = v return (D,P) def shortest_path(self, G, src, dst): D,P", "path + [src] if src == dst: return [path] if src not in", "if v not in path: temp = self.find_all_paths(v, dst, path) for p in", "P[w] = v return (D,P) def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G,", "Q[w] = vw_length P[w] = v return (D,P) def shortest_path(self, G, src, dst):", "src not in graph: return [] paths = [] for v in graph[src]:", "v): return True else: return True return False def Dijkstra(self, G, src, dst=None):", "src not in self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] =", "[] graph = self.vertices path = path + [src] if src == dst:", "not in path: temp = self.find_shortest_path(v, dst, path) if temp: if not shortest_path", "path: temp = self.find_shortest_path(v, dst, path) if temp: if not shortest_path or len(temp)", "visited = set() graph = self.vertices vertices = list(graph.keys()) if src is None:", "path if src not in graph: return None for v in graph[src]: if", "self.vertices path = path + [src] if src == dst: return [path] if", "temp: if not shortest_path or len(temp) < len(shortest_path): shortest_path = temp return shortest_path", "in Q: D[v] = Q[v] if v == dst: break for w in", "False def Dijkstra(self, G, src, dst=None): D = {} P = {} Q", "+ [src] if src == dst: return [path] if src not in graph:", "dst: break for w in G[v]: vw_length = D[v] + G[v][w] if w", "if v not in path: temp = self.find_shortest_path(v, dst, path) if temp: if", "return (D,P) def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src, dst) path", "dst: return path if src not in graph: return None for v in", "None: src = vertices[0] visited.add(src) if len(visited) != len(vertices): for v in graph[src]:", "temp return None def find_all_paths(self, src, dst, path=None): if path is None: path", "if node in self.vertices: return self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys()", "in graph[src]: if v not in path: temp = self.find_shortest_path(v, dst, path) if", "Q or vw_length < Q[w]: Q[w] = vw_length P[w] = v return (D,P)", "src not in graph: return None for v in graph[src]: if v not", "shortest_path def is_connected(self, visited=None, src=None): if visited is None: visited = set() graph", "self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst, path=None): if path is None:", "graph = self.vertices path = path + [src] if src == dst: return", "if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst,", "def Dijkstra(self, G, src, dst=None): D = {} P = {} Q =", "def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0): if src not in", "= {} Q = prio_dict.PriorityDictionary() Q[src] = 0 for v in Q: D[v]", "key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {}", "if v not in path: temp = self.find_path(v, dst, path) if temp: return", "= temp return shortest_path def is_connected(self, visited=None, src=None): if visited is None: visited", "return path if src not in graph: return None for v in graph[src]:", "graph = self.vertices vertices = list(graph.keys()) if src is None: src = vertices[0]", "dst, weight=0): if src not in self.vertices: self.add_vertex(src) if dst not in self.vertices:", "return None shortest_path = None for v in graph[src]: if v not in", "is_connected(self, visited=None, src=None): if visited is None: visited = set() graph = self.vertices", "< len(shortest_path): shortest_path = temp return shortest_path def is_connected(self, visited=None, src=None): if visited", "self.vertices vertices = list(graph.keys()) if src is None: src = vertices[0] visited.add(src) if", "if src == dst: return path if src not in graph: return None", "w in D: if vw_length < D[w]: raise ValueError elif w not in", "dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst, path=None):", "= v return (D,P) def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src,", "paths.append(p) return paths def find_shortest_path(self, src, dst, path=None): if path is None: path", "in self.vertices: return self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys() def add_edge(self,", "def add_edge(self, src, dst, weight=0): if src not in self.vertices: self.add_vertex(src) if dst", "in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst, path=None): if path", "if v not in visited: if self.is_connected(visited, v): return True else: return True", "vw_length < Q[w]: Q[w] = vw_length P[w] = v return (D,P) def shortest_path(self,", "None def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0): if src not", "dst, path) if temp: if not shortest_path or len(temp) < len(shortest_path): shortest_path =", "self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst, path=None): if path is", "import prio_dict class Graph: def __init__(self): self.vertices = {} def __str__(self): return str([key", "for p in temp: paths.append(p) return paths def find_shortest_path(self, src, dst, path=None): if", "= [] graph = self.vertices path = path + [src] if src ==", "paths def find_shortest_path(self, src, dst, path=None): if path is None: path = []", "ValueError elif w not in Q or vw_length < Q[w]: Q[w] = vw_length", "Dijkstra(self, G, src, dst=None): D = {} P = {} Q = prio_dict.PriorityDictionary()", "not in Q or vw_length < Q[w]: Q[w] = vw_length P[w] = v", "not in self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight", "graph: return [] paths = [] for v in graph[src]: if v not", "in graph[src]: if v not in visited: if self.is_connected(visited, v): return True else:", "visited=None, src=None): if visited is None: visited = set() graph = self.vertices vertices", "v == dst: break for w in G[v]: vw_length = D[v] + G[v][w]", "set() graph = self.vertices vertices = list(graph.keys()) if src is None: src =", "dst): D,P = self.Dijkstra(G, src, dst) path = [] while True: path.append(dst) if", "None: path = [] graph = self.vertices path = path + [src] if", "if visited is None: visited = set() graph = self.vertices vertices = list(graph.keys())", "graph: return None shortest_path = None for v in graph[src]: if v not", "src == dst: return [path] if src not in graph: return [] paths", "find_path(self, src, dst, path=None): if path is None: path = [] graph =", "path) if temp: if not shortest_path or len(temp) < len(shortest_path): shortest_path = temp", "<gh_stars>0 import prio_dict class Graph: def __init__(self): self.vertices = {} def __str__(self): return", "G, src, dst): D,P = self.Dijkstra(G, src, dst) path = [] while True:", "in G[v]: vw_length = D[v] + G[v][w] if w in D: if vw_length", "Graph: def __init__(self): self.vertices = {} def __str__(self): return str([key for key in", "True return False def Dijkstra(self, G, src, dst=None): D = {} P =", "= self.find_path(v, dst, path) if temp: return temp return None def find_all_paths(self, src,", "raise ValueError elif w not in Q or vw_length < Q[w]: Q[w] =", "= {} def get_vertex(self, node): if node in self.vertices: return self.vertices[node] else: return", "return str([key for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node):", "__iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node): if", "src == dst: return path if src not in graph: return None shortest_path", "None for v in graph[src]: if v not in path: temp = self.find_shortest_path(v,", "path = [] graph = self.vertices path = path + [src] if src", "not in path: temp = self.find_all_paths(v, dst, path) for p in temp: paths.append(p)", "w not in Q or vw_length < Q[w]: Q[w] = vw_length P[w] =", "not in graph: return [] paths = [] for v in graph[src]: if", "= self.vertices vertices = list(graph.keys()) if src is None: src = vertices[0] visited.add(src)", "len(vertices): for v in graph[src]: if v not in visited: if self.is_connected(visited, v):", "D,P = self.Dijkstra(G, src, dst) path = [] while True: path.append(dst) if dst", "def find_shortest_path(self, src, dst, path=None): if path is None: path = [] graph", "in path: temp = self.find_all_paths(v, dst, path) for p in temp: paths.append(p) return", "def find_all_paths(self, src, dst, path=None): if path is None: path = [] graph", "get_vertex(self, node): if node in self.vertices: return self.vertices[node] else: return None def get_vertices(self):", "return self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst,", "return paths def find_shortest_path(self, src, dst, path=None): if path is None: path =", "[src] if src == dst: return [path] if src not in graph: return", "v not in visited: if self.is_connected(visited, v): return True else: return True return", "vertices = list(graph.keys()) if src is None: src = vertices[0] visited.add(src) if len(visited)", "while True: path.append(dst) if dst == src: break dst = P[dst] path.reverse() return", "vertices[0] visited.add(src) if len(visited) != len(vertices): for v in graph[src]: if v not", "return None for v in graph[src]: if v not in path: temp =", "dst, path) for p in temp: paths.append(p) return paths def find_shortest_path(self, src, dst,", "for v in graph[src]: if v not in path: temp = self.find_shortest_path(v, dst,", "self.vertices: return self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys() def add_edge(self, src,", "self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src,", "src, dst, path=None): if path is None: path = [] graph = self.vertices", "in temp: paths.append(p) return paths def find_shortest_path(self, src, dst, path=None): if path is", "path) if temp: return temp return None def find_all_paths(self, src, dst, path=None): if", "= {} def __str__(self): return str([key for key in self.vertices.keys()]) def __iter__(self): return", "find_shortest_path(self, src, dst, path=None): if path is None: path = [] graph =", "v in graph[src]: if v not in path: temp = self.find_shortest_path(v, dst, path)", "path = path + [src] if src == dst: return [path] if src", "[src] if src == dst: return path if src not in graph: return", "v not in path: temp = self.find_shortest_path(v, dst, path) if temp: if not", "G[v]: vw_length = D[v] + G[v][w] if w in D: if vw_length <", "vw_length = D[v] + G[v][w] if w in D: if vw_length < D[w]:", "if w in D: if vw_length < D[w]: raise ValueError elif w not", "def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src, dst) path = []", "Q: D[v] = Q[v] if v == dst: break for w in G[v]:", "weight=0): if src not in self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst)", "= [] while True: path.append(dst) if dst == src: break dst = P[dst]", "self.vertices[node] = {} def get_vertex(self, node): if node in self.vertices: return self.vertices[node] else:", "src, dst, weight=0): if src not in self.vertices: self.add_vertex(src) if dst not in", "if path is None: path = [] graph = self.vertices path = path", "in graph: return [] paths = [] for v in graph[src]: if v", "def is_connected(self, visited=None, src=None): if visited is None: visited = set() graph =", "= 0 for v in Q: D[v] = Q[v] if v == dst:", "+ G[v][w] if w in D: if vw_length < D[w]: raise ValueError elif", "if temp: return temp return None def find_all_paths(self, src, dst, path=None): if path", "not in visited: if self.is_connected(visited, v): return True else: return True return False", "prio_dict.PriorityDictionary() Q[src] = 0 for v in Q: D[v] = Q[v] if v", "in Q or vw_length < Q[w]: Q[w] = vw_length P[w] = v return", "if vw_length < D[w]: raise ValueError elif w not in Q or vw_length", "return None def find_all_paths(self, src, dst, path=None): if path is None: path =", "return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node): if node", "path: temp = self.find_all_paths(v, dst, path) for p in temp: paths.append(p) return paths", "D[v] + G[v][w] if w in D: if vw_length < D[w]: raise ValueError", "is None: visited = set() graph = self.vertices vertices = list(graph.keys()) if src", "src is None: src = vertices[0] visited.add(src) if len(visited) != len(vertices): for v", "return temp return None def find_all_paths(self, src, dst, path=None): if path is None:", "return False def Dijkstra(self, G, src, dst=None): D = {} P = {}", "shortest_path = None for v in graph[src]: if v not in path: temp", "not in graph: return None shortest_path = None for v in graph[src]: if", "v not in path: temp = self.find_path(v, dst, path) if temp: return temp", "[] while True: path.append(dst) if dst == src: break dst = P[dst] path.reverse()", "graph[src]: if v not in visited: if self.is_connected(visited, v): return True else: return", "D = {} P = {} Q = prio_dict.PriorityDictionary() Q[src] = 0 for", "self.vertices[node] else: return None def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0):", "= list(graph.keys()) if src is None: src = vertices[0] visited.add(src) if len(visited) !=", "temp: paths.append(p) return paths def find_shortest_path(self, src, dst, path=None): if path is None:", "not in graph: return None for v in graph[src]: if v not in", "< D[w]: raise ValueError elif w not in Q or vw_length < Q[w]:", "elif w not in Q or vw_length < Q[w]: Q[w] = vw_length P[w]", "path) for p in temp: paths.append(p) return paths def find_shortest_path(self, src, dst, path=None):", "len(temp) < len(shortest_path): shortest_path = temp return shortest_path def is_connected(self, visited=None, src=None): if", "for v in graph[src]: if v not in path: temp = self.find_all_paths(v, dst,", "path if src not in graph: return None shortest_path = None for v", "def get_vertex(self, node): if node in self.vertices: return self.vertices[node] else: return None def", "shortest_path = temp return shortest_path def is_connected(self, visited=None, src=None): if visited is None:", "str([key for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node]", "= None for v in graph[src]: if v not in path: temp =", "self.find_shortest_path(v, dst, path) if temp: if not shortest_path or len(temp) < len(shortest_path): shortest_path", "temp return shortest_path def is_connected(self, visited=None, src=None): if visited is None: visited =", "in graph[src]: if v not in path: temp = self.find_path(v, dst, path) if", "visited: if self.is_connected(visited, v): return True else: return True return False def Dijkstra(self,", "shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src, dst) path = [] while", "def __init__(self): self.vertices = {} def __str__(self): return str([key for key in self.vertices.keys()])", "vw_length < D[w]: raise ValueError elif w not in Q or vw_length <", "temp = self.find_path(v, dst, path) if temp: return temp return None def find_all_paths(self,", "p in temp: paths.append(p) return paths def find_shortest_path(self, src, dst, path=None): if path", "== dst: break for w in G[v]: vw_length = D[v] + G[v][w] if", "= D[v] + G[v][w] if w in D: if vw_length < D[w]: raise", "add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node): if node in self.vertices: return", "__str__(self): return str([key for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self,", "src=None): if visited is None: visited = set() graph = self.vertices vertices =", "if len(visited) != len(vertices): for v in graph[src]: if v not in visited:", "return self.vertices.keys() def add_edge(self, src, dst, weight=0): if src not in self.vertices: self.add_vertex(src)", "return True else: return True return False def Dijkstra(self, G, src, dst=None): D", "v in graph[src]: if v not in path: temp = self.find_all_paths(v, dst, path)", "dst, path=None): if path is None: path = [] graph = self.vertices path", "if src == dst: return [path] if src not in graph: return []", "dst, path) if temp: return temp return None def find_all_paths(self, src, dst, path=None):", "v in Q: D[v] = Q[v] if v == dst: break for w", "path = path + [src] if src == dst: return path if src", "dst: return [path] if src not in graph: return [] paths = []", "list(graph.keys()) if src is None: src = vertices[0] visited.add(src) if len(visited) != len(vertices):", "dst=None): D = {} P = {} Q = prio_dict.PriorityDictionary() Q[src] = 0", "if src not in graph: return [] paths = [] for v in", "visited is None: visited = set() graph = self.vertices vertices = list(graph.keys()) if", "return shortest_path def is_connected(self, visited=None, src=None): if visited is None: visited = set()", "True: path.append(dst) if dst == src: break dst = P[dst] path.reverse() return path", "temp = self.find_shortest_path(v, dst, path) if temp: if not shortest_path or len(temp) <", "not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self, src, dst, path=None): if", "node): self.vertices[node] = {} def get_vertex(self, node): if node in self.vertices: return self.vertices[node]", "def find_path(self, src, dst, path=None): if path is None: path = [] graph", "Q[v] if v == dst: break for w in G[v]: vw_length = D[v]", "in graph: return None shortest_path = None for v in graph[src]: if v", "__init__(self): self.vertices = {} def __str__(self): return str([key for key in self.vertices.keys()]) def", "= self.vertices path = path + [src] if src == dst: return [path]", "None: visited = set() graph = self.vertices vertices = list(graph.keys()) if src is", "if self.is_connected(visited, v): return True else: return True return False def Dijkstra(self, G,", "if not shortest_path or len(temp) < len(shortest_path): shortest_path = temp return shortest_path def", "len(shortest_path): shortest_path = temp return shortest_path def is_connected(self, visited=None, src=None): if visited is", "0 for v in Q: D[v] = Q[v] if v == dst: break", "node): if node in self.vertices: return self.vertices[node] else: return None def get_vertices(self): return", "== dst: return path if src not in graph: return None shortest_path =", "= self.Dijkstra(G, src, dst) path = [] while True: path.append(dst) if dst ==", "not in path: temp = self.find_path(v, dst, path) if temp: return temp return", "else: return None def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0): if", "{} P = {} Q = prio_dict.PriorityDictionary() Q[src] = 0 for v in", "in visited: if self.is_connected(visited, v): return True else: return True return False def", "is None: src = vertices[0] visited.add(src) if len(visited) != len(vertices): for v in", "return [path] if src not in graph: return [] paths = [] for", "src == dst: return path if src not in graph: return None for", "== dst: return path if src not in graph: return None for v", "if v == dst: break for w in G[v]: vw_length = D[v] +", "path: temp = self.find_path(v, dst, path) if temp: return temp return None def", "= {} P = {} Q = prio_dict.PriorityDictionary() Q[src] = 0 for v", "return None def get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0): if src", "None shortest_path = None for v in graph[src]: if v not in path:", "{} def get_vertex(self, node): if node in self.vertices: return self.vertices[node] else: return None", "visited.add(src) if len(visited) != len(vertices): for v in graph[src]: if v not in", "not shortest_path or len(temp) < len(shortest_path): shortest_path = temp return shortest_path def is_connected(self,", "return path if src not in graph: return None shortest_path = None for", "= path + [src] if src == dst: return [path] if src not", "self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def find_path(self,", "src = vertices[0] visited.add(src) if len(visited) != len(vertices): for v in graph[src]: if", "graph[src]: if v not in path: temp = self.find_shortest_path(v, dst, path) if temp:", "[] for v in graph[src]: if v not in path: temp = self.find_all_paths(v,", "self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self,", "None def find_all_paths(self, src, dst, path=None): if path is None: path = []", "w in G[v]: vw_length = D[v] + G[v][w] if w in D: if", "= self.vertices path = path + [src] if src == dst: return path", "if temp: if not shortest_path or len(temp) < len(shortest_path): shortest_path = temp return", "v return (D,P) def shortest_path(self, G, src, dst): D,P = self.Dijkstra(G, src, dst)", "None for v in graph[src]: if v not in path: temp = self.find_path(v,", "else: return True return False def Dijkstra(self, G, src, dst=None): D = {}", "shortest_path or len(temp) < len(shortest_path): shortest_path = temp return shortest_path def is_connected(self, visited=None,", "= self.find_shortest_path(v, dst, path) if temp: if not shortest_path or len(temp) < len(shortest_path):", "src, dst) path = [] while True: path.append(dst) if dst == src: break", "for w in G[v]: vw_length = D[v] + G[v][w] if w in D:", "in graph[src]: if v not in path: temp = self.find_all_paths(v, dst, path) for", "= weight def find_path(self, src, dst, path=None): if path is None: path =", "break for w in G[v]: vw_length = D[v] + G[v][w] if w in", "src, dst=None): D = {} P = {} Q = prio_dict.PriorityDictionary() Q[src] =", "class Graph: def __init__(self): self.vertices = {} def __str__(self): return str([key for key", "weight def find_path(self, src, dst, path=None): if path is None: path = []", "path + [src] if src == dst: return path if src not in", "in self.vertices: self.add_vertex(src) if dst not in self.vertices: self.add_vertex(dst) self.vertices[src][dst] = weight def", "dst) path = [] while True: path.append(dst) if dst == src: break dst", "== dst: return [path] if src not in graph: return [] paths =", "for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] =", "temp = self.find_all_paths(v, dst, path) for p in temp: paths.append(p) return paths def", "in graph: return None for v in graph[src]: if v not in path:", "add_edge(self, src, dst, weight=0): if src not in self.vertices: self.add_vertex(src) if dst not", "= Q[v] if v == dst: break for w in G[v]: vw_length =", "self.vertices = {} def __str__(self): return str([key for key in self.vertices.keys()]) def __iter__(self):", "< Q[w]: Q[w] = vw_length P[w] = v return (D,P) def shortest_path(self, G,", "graph[src]: if v not in path: temp = self.find_all_paths(v, dst, path) for p", "iter(self.vertices.values()) def add_vertex(self, node): self.vertices[node] = {} def get_vertex(self, node): if node in", "in D: if vw_length < D[w]: raise ValueError elif w not in Q", "find_all_paths(self, src, dst, path=None): if path is None: path = [] graph =", "path=None): if path is None: path = [] graph = self.vertices path =", "self.Dijkstra(G, src, dst) path = [] while True: path.append(dst) if dst == src:", "get_vertices(self): return self.vertices.keys() def add_edge(self, src, dst, weight=0): if src not in self.vertices:", "if src not in graph: return None for v in graph[src]: if v", "{} def __str__(self): return str([key for key in self.vertices.keys()]) def __iter__(self): return iter(self.vertices.values())", "G[v][w] if w in D: if vw_length < D[w]: raise ValueError elif w" ]
[ "live_neighbors > 3): board[row][col] = 0 if copy_board[row][col] == 0 and live_neighbors ==", "(-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols = len(board[0]) copy_board =", "neighbors: r = (row + neighbor[0]) c = (col + neighbor[1]) if (rows", "c = (col + neighbor[1]) if (rows > r >= 0) and (cols", "if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3): board[row][col]", "rows = len(board) cols = len(board[0]) copy_board = [[board[row][col] for col in range(cols)]", "= 0 if copy_board[row][col] == 0 and live_neighbors == 3: board[row][col] = 1", "= len(board[0]) copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)]", "> c >= 0) and (copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col]", "\"\"\" 1. Clarification 2. Possible solutions - Simulation v1 - Simulation optimised v2", "in range(cols)] for row in range(rows)] for row in range(rows): for col in", "= 1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) -> None:", "S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1),", "1 and (live_neighbors < 2 or live_neighbors > 3): board[row][col] = 0 if", "for row in range(rows): for col in range(cols): live_neighbors = 0 for neighbor", "col in range(cols)] for row in range(rows)] for row in range(rows): for col", "cols = len(board[0]) copy_board = [[board[row][col] for col in range(cols)] for row in", "board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1),", "(live_neighbors < 2 or live_neighbors > 3): board[row][col] = 0 if copy_board[row][col] ==", "== 1 and (live_neighbors < 2 or live_neighbors > 3): board[row][col] = 0", "(0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols = len(board[0]) copy_board", "-> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows", "for neighbor in neighbors: r = (row + neighbor[0]) c = (col +", "S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1),", "neighbor[0]) c = (col + neighbor[1]) if (rows > r >= 0) and", "in neighbors: r = (row + neighbor[0]) c = (col + neighbor[1]) if", "row in range(rows)] for row in range(rows): for col in range(cols): live_neighbors =", "(1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols = len(board[0])", "range(cols): live_neighbors = 0 for neighbor in neighbors: r = (row + neighbor[0])", "[(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols =", "live_neighbors = 0 for neighbor in neighbors: r = (row + neighbor[0]) c", "+ neighbor[0]) c = (col + neighbor[1]) if (rows > r >= 0)", "class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1),", "and (copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col] == 1 and (live_neighbors", "for row in range(rows)] for row in range(rows): for col in range(cols): live_neighbors", "2 or live_neighbors > 3): board[row][col] = 0 if copy_board[row][col] == 0 and", "+= 1 if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors >", "and (live_neighbors < 2 or live_neighbors > 3): board[row][col] = 0 if copy_board[row][col]", "for col in range(cols)] for row in range(rows)] for row in range(rows): for", "or live_neighbors > 3): board[row][col] = 0 if copy_board[row][col] == 0 and live_neighbors", "= [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols", "in range(rows)] for row in range(rows): for col in range(cols): live_neighbors = 0", "solutions - Simulation v1 - Simulation optimised v2 3. Coding 4. Tests \"\"\"", "Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1),", "range(rows)] for row in range(rows): for col in range(cols): live_neighbors = 0 for", ">= 0) and (cols > c >= 0) and (copy_board[r][c] == 1): live_neighbors", "for col in range(cols): live_neighbors = 0 for neighbor in neighbors: r =", "(-1,1), (0,1), (1,1)] rows = len(board) cols = len(board[0]) copy_board = [[board[row][col] for", "len(board[0]) copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)] for", "Possible solutions - Simulation v1 - Simulation optimised v2 3. Coding 4. Tests", "3): board[row][col] = 0 if copy_board[row][col] == 0 and live_neighbors == 3: board[row][col]", "(copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col] == 1 and (live_neighbors <", "1): live_neighbors += 1 if copy_board[row][col] == 1 and (live_neighbors < 2 or", "(cols > c >= 0) and (copy_board[r][c] == 1): live_neighbors += 1 if", "T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0),", "Clarification 2. Possible solutions - Simulation v1 - Simulation optimised v2 3. Coding", "board[row][col] = 0 if copy_board[row][col] == 0 and live_neighbors == 3: board[row][col] =", "and (cols > c >= 0) and (copy_board[r][c] == 1): live_neighbors += 1", "0 and live_neighbors == 3: board[row][col] = 1 # T=O(m*n), S=O(1) class Solution:", "in range(rows): for col in range(cols): live_neighbors = 0 for neighbor in neighbors:", "Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) -> None:", "neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows = len(board)", "# T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors =", "r = (row + neighbor[0]) c = (col + neighbor[1]) if (rows >", "col in range(cols): live_neighbors = 0 for neighbor in neighbors: r = (row", "copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3): board[row][col] =", "r >= 0) and (cols > c >= 0) and (copy_board[r][c] == 1):", "3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board:", "if (rows > r >= 0) and (cols > c >= 0) and", "== 3: board[row][col] = 1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board:", "= (col + neighbor[1]) if (rows > r >= 0) and (cols >", "# T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors =", "1. Clarification 2. Possible solutions - Simulation v1 - Simulation optimised v2 3.", "0 if copy_board[row][col] == 0 and live_neighbors == 3: board[row][col] = 1 #", "(0,1), (1,1)] rows = len(board) cols = len(board[0]) copy_board = [[board[row][col] for col", "optimised v2 3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def", "1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors", "4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) ->", "== 0 and live_neighbors == 3: board[row][col] = 1 # T=O(m*n), S=O(1) class", "None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)] rows =", "c >= 0) and (copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col] ==", "0) and (copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col] == 1 and", "List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]", "Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]])", "len(board) cols = len(board[0]) copy_board = [[board[row][col] for col in range(cols)] for row", "live_neighbors += 1 if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors", "(1,1)] rows = len(board) cols = len(board[0]) copy_board = [[board[row][col] for col in", "T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0),", "3: board[row][col] = 1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]])", "in range(cols): live_neighbors = 0 for neighbor in neighbors: r = (row +", "range(cols)] for row in range(rows)] for row in range(rows): for col in range(cols):", "Simulation optimised v2 3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution:", ">= 0) and (copy_board[r][c] == 1): live_neighbors += 1 if copy_board[row][col] == 1", "def gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0),", "range(rows): for col in range(cols): live_neighbors = 0 for neighbor in neighbors: r", "live_neighbors == 3: board[row][col] = 1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self,", "Simulation v1 - Simulation optimised v2 3. Coding 4. Tests \"\"\" # T=O(m*n),", "(col + neighbor[1]) if (rows > r >= 0) and (cols > c", "(rows > r >= 0) and (cols > c >= 0) and (copy_board[r][c]", "v2 3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self,", "neighbor[1]) if (rows > r >= 0) and (cols > c >= 0)", "gameOfLife(self, board: List[List[int]]) -> None: neighbors = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1),", "board[row][col] = 1 # T=O(m*n), S=O(1) class Solution: def gameOfLife(self, board: List[List[int]]) ->", "(row + neighbor[0]) c = (col + neighbor[1]) if (rows > r >=", "neighbor in neighbors: r = (row + neighbor[0]) c = (col + neighbor[1])", "(-1,0), (-1,1), (0,1), (1,1)] rows = len(board) cols = len(board[0]) copy_board = [[board[row][col]", "2. Possible solutions - Simulation v1 - Simulation optimised v2 3. Coding 4.", "- Simulation v1 - Simulation optimised v2 3. Coding 4. Tests \"\"\" #", "1 if copy_board[row][col] == 1 and (live_neighbors < 2 or live_neighbors > 3):", "> 3): board[row][col] = 0 if copy_board[row][col] == 0 and live_neighbors == 3:", "[[board[row][col] for col in range(cols)] for row in range(rows)] for row in range(rows):", "v1 - Simulation optimised v2 3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n)", "<reponame>woozway/py3-leetcode<gh_stars>1-10 \"\"\" 1. Clarification 2. Possible solutions - Simulation v1 - Simulation optimised", "row in range(rows): for col in range(cols): live_neighbors = 0 for neighbor in", "if copy_board[row][col] == 0 and live_neighbors == 3: board[row][col] = 1 # T=O(m*n),", "\"\"\" # T=O(m*n), S=O(m*n) class Solution: def gameOfLife(self, board: List[List[int]]) -> None: neighbors", "and live_neighbors == 3: board[row][col] = 1 # T=O(m*n), S=O(1) class Solution: def", "- Simulation optimised v2 3. Coding 4. Tests \"\"\" # T=O(m*n), S=O(m*n) class", "= (row + neighbor[0]) c = (col + neighbor[1]) if (rows > r", "copy_board[row][col] == 0 and live_neighbors == 3: board[row][col] = 1 # T=O(m*n), S=O(1)", "copy_board = [[board[row][col] for col in range(cols)] for row in range(rows)] for row", "= len(board) cols = len(board[0]) copy_board = [[board[row][col] for col in range(cols)] for", "> r >= 0) and (cols > c >= 0) and (copy_board[r][c] ==", "0) and (cols > c >= 0) and (copy_board[r][c] == 1): live_neighbors +=", "== 1): live_neighbors += 1 if copy_board[row][col] == 1 and (live_neighbors < 2", "0 for neighbor in neighbors: r = (row + neighbor[0]) c = (col", "< 2 or live_neighbors > 3): board[row][col] = 0 if copy_board[row][col] == 0", "+ neighbor[1]) if (rows > r >= 0) and (cols > c >=", "= 0 for neighbor in neighbors: r = (row + neighbor[0]) c =", "= [[board[row][col] for col in range(cols)] for row in range(rows)] for row in" ]
[ "Table from typing import List from pathlib import Path from dbxdeploy.package.Dependency import Dependency", "dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return", "if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) ->", "pathlib import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator", "RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator:", "__init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter", "= dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list:", "tomlkit from tomlkit.items import Table from typing import List from pathlib import Path", "dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig", "Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig()", "package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str:", "if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency {dependency_name} not found in poetry.lock\")", "import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import", "main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies: dependency_name = dependency[0]", "project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency in", "for package in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies:", "from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class", "in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def", "requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path:", "-> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def", "dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ):", "and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for dependency in", "[package for package in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self,", "dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency {dependency_name} not", "Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig", "self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name,", "tomlkit.parse(f.read()) return [package for package in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]]", "requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\")", "config = tomlkit.parse(f.read()) return [package for package in config[\"package\"] if package[\"category\"] == \"main\"", "def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies", "= [] for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name))", "in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency {dependency_name} not found", "RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) ->", "import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import", "poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for", "self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path", "self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def", "in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name:", "dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) ->", "dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list: requirements_config", "dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader:", "requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with", "RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir:", "self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies", "dependencies = [] for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies,", "lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package", "-> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies =", "lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package for package in config[\"package\"] if", "-> List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package for package", "__find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for dependency in dependencies: if dependency[\"name\"].lower()", "dependencies: List[Table], dependency_name: str) -> str: for dependency in dependencies: if dependency[\"name\"].lower() ==", "typing import List from pathlib import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter", "Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies", "= tomlkit.parse(f.read()) return [package for package in config[\"package\"] if package[\"category\"] == \"main\" and", "__load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return", "project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies()", "return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f:", "import tomlkit from tomlkit.items import Table from typing import List from pathlib import", "RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) ->", "Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator", "load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies =", "for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return", "class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator", "with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package for package in config[\"package\"]", "= RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path)", "self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as", "RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__(", "= self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append(", "dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt", "List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package for package in", "[] for dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) )", "str) -> str: for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"]", "package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for dependency in dependencies:", "list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config", "f: config = tomlkit.parse(f.read()) return [package for package in config[\"package\"] if package[\"category\"] ==", "__load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines()))", "== \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for", "<reponame>Kukuksumusu/dbx-deploy import tomlkit from tomlkit.items import Table from typing import List from pathlib", "import List from pathlib import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import", "import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator", "= self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\")", "requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config =", "self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies: dependency_name =", "tomlkit.items import Table from typing import List from pathlib import Path from dbxdeploy.package.Dependency", "\"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for dependency", "from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from", "main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self)", "import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter,", "= requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path =", "requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path)", "RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator =", "def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read())", "= project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency", "requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path)", ") return dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt =", "from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator,", "import Table from typing import List from pathlib import Path from dbxdeploy.package.Dependency import", "List[Table], dependency_name: str) -> str: for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower():", "from pathlib import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter from", "config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str)", "def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse,", "def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter =", "dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self, requirements_line_converter:", "requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self,", "dependency in main_dependencies: dependency_name = dependency[0] dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies", "from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def __init__( self,", "dependencies.append( Dependency(dependency_name, self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list: requirements_config =", "requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self, lockfile_path: Path) -> List[Table]:", "return [package for package in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def", "str: for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency", "): self.__requirements_generator = requirements_generator self.__requirements_line_converter = requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]:", "def __find_poetry_lock_version_by_name(self, dependencies: List[Table], dependency_name: str) -> str: for dependency in dependencies: if", "from typing import List from pathlib import Path from dbxdeploy.package.Dependency import Dependency from", "list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config) return list(map(self.__requirements_line_converter.parse, requirements_txt.splitlines())) def __load_poetry_lock_dependencies(self,", "package in config[\"package\"] if package[\"category\"] == \"main\" and package[\"name\"]] def __find_poetry_lock_version_by_name(self, dependencies: List[Table],", "dependency_name: str) -> str: for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return", "from tomlkit.items import Table from typing import List from pathlib import Path from", "return dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info() requirements_txt = self.__requirements_generator.generate(requirements_config)", "PackageDependencyLoader: def __init__( self, requirements_line_converter: RequirementsLineConverter, requirements_generator: RequirementsGenerator, ): self.__requirements_generator = requirements_generator self.__requirements_line_converter", "List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = []", "import RequirementsLineConverter from dbxdeploy.package.RequirementsGenerator import RequirementsGenerator from dbxdeploy.package.RequirementsConfig import RequirementsConfig class PackageDependencyLoader: def", "= requirements_line_converter def load(self, project_base_dir: Path) -> List[Dependency]: poetry_lock_path = project_base_dir.joinpath(\"poetry.lock\") poetry_lock_dependencies =", "for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency {dependency_name}", "-> str: for dependency in dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise", "dependencies: if dependency[\"name\"].lower() == dependency_name.lower(): return dependency[\"version\"] raise Exception(f\"Dependency {dependency_name} not found in", "poetry_lock_dependencies = self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies:", "as f: config = tomlkit.parse(f.read()) return [package for package in config[\"package\"] if package[\"category\"]", "self.__find_poetry_lock_version_by_name(poetry_lock_dependencies, dependency_name)) ) return dependencies def __load_main_dependencies(self) -> list: requirements_config = RequirementsConfig() requirements_config.exclude_index_info()", "Path) -> List[Table]: with lockfile_path.open(\"r\") as f: config = tomlkit.parse(f.read()) return [package for", "= self.__load_poetry_lock_dependencies(poetry_lock_path) main_dependencies = self.__load_main_dependencies() dependencies = [] for dependency in main_dependencies: dependency_name", "List from pathlib import Path from dbxdeploy.package.Dependency import Dependency from dbxdeploy.package.RequirementsLineConverter import RequirementsLineConverter" ]
[ "0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\" if self._oracle_type ==", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "been implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions()", "the empirical game and corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return", "initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle,", "self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def", "get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent to merge policies and weights", "= num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy", "self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method", "**kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game: a mean-field game. :param oracle_type:", "loop (finding BR target based on the empirical game) if needed. :param initial_policies:", "or \"DQN\" RL approximate best response. :param num_inner_iters: the number of iterations for", "for the inner loop (finding BR target based on the empirical game) if", "this file except in compliance with the License. # You may obtain a", "policies and corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions =", "Trainer. :param mfg_game: a mean-field game. :param oracle_type: \"BR\" exact best response or", "Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0", "empirical game) if needed. :param initial_policies: initial policies. Uniform policies by default. :param", "game and corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights", "ANY KIND, either express or implied. # See the License for the specific", "distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif", "the empirical game) if needed. :param initial_policies: initial policies. Uniform policies by default.", "= inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies", "\"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies", "game) if needed. :param initial_policies: initial policies. Uniform policies by default. :param meta_strategy_method:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "the License. \"\"\" The trainer of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms", "self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding", "iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop()", "output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original", "def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to the empirical game. :param", "2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache", "Uniform policies by default. :param meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "induced by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "(EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\"", ":param initial_policies: initial policies. Uniform policies by default. :param meta_strategy_method: method for the", "weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return self._policies def get_distrbutions(self): return", "\"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters", "None def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy", "self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset()", "OF ANY KIND, either express or implied. # See the License for the", "= None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions.", "meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical", "training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self):", "def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy =", "updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None", "meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0", "Adding new best-response policies to the empirical game. :param output_merged_policy: a merged policy", "output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical", "the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players()", "open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA", "self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to the empirical game.", "under the License. \"\"\" The trainer of EGTA for mean field game.\"\"\" from", "are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy", "the inner loop (finding BR target based on the empirical game) if needed.", "implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method", "Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def", "= self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in", ":param mfg_game: a mean-field game. :param oracle_type: \"BR\" exact best response or \"DQN\"", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "the number of iterations for the inner loop (finding BR target based on", "in the empirical game and corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies()", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "number of iterations for the inner loop (finding BR target based on the", "= self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to the empirical", "policy. Equivalent to merge policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def", "and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop =", "distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return", "has not been implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter =", "strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return the", "self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical game and", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main training", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\"", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "== \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has not been implemented.\") def", "empirical game. :param output_merged_policy: a merged policy induced by inner loop. :return: \"\"\"", "default. :param meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type", "\"\"\" Adding new best-response policies to the empirical game. :param output_merged_policy: a merged", "required by applicable law or agreed to in writing, software # distributed under", "loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game,", "by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution)", "applicable law or agreed to in writing, software # distributed under the License", "best response. :param num_inner_iters: the number of iterations for the inner loop (finding", "open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object):", "game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop", "or agreed to in writing, software # distributed under the License is distributed", "mfg_game: a mean-field game. :param oracle_type: \"BR\" exact best response or \"DQN\" RL", "def get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent to merge policies and", "get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical game and corresponding output mixed", "dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)", "+= 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self):", "\"BR\" exact best response or \"DQN\" RL approximate best response. :param num_inner_iters: the", "= None def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset()", "self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter", "specific language governing permissions and # limitations under the License. \"\"\" The trainer", "oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "policies by default. :param meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game =", "writing, software # distributed under the License is distributed on an \"AS IS\"", "for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from", "def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method =", "Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions,", "# limitations under the License. \"\"\" The trainer of EGTA for mean field", "__init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "target based on the empirical game) if needed. :param initial_policies: initial policies. Uniform", "self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise", "(finding BR target based on the empirical game) if needed. :param initial_policies: initial", "self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method)", "self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else:", "distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle", "output_merged_policy): \"\"\" Adding new best-response policies to the empirical game. :param output_merged_policy: a", "to merge policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return", "compliance with the License. # You may obtain a copy of the License", "the MFG Trainer. :param mfg_game: a mean-field game. :param oracle_type: \"BR\" exact best", "policy and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop", "open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\"", "\"\"\" Return original policies in the empirical game and corresponding output mixed strategies.", "greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies", "self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis", "\"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG", "init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self,", "oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game: a", "inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all generated", "limitations under the License. \"\"\" The trainer of EGTA for mean field game.\"\"\"", "import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA)", "not use this file except in compliance with the License. # You may", "of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\"", "mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA", "method for the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players", "License, Version 2.0 (the \"License\"); # you may not use this file except", "MFG Trainer. :param mfg_game: a mean-field game. :param oracle_type: \"BR\" exact best response", "oracle_type: \"BR\" exact best response or \"DQN\" RL approximate best response. :param num_inner_iters:", "\"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return the output", "Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the", "best-response policies to the empirical game. :param output_merged_policy: a merged policy induced by", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"\"\" Return the output merged policy. Equivalent to merge policies and weights from", "iterations for the inner loop (finding BR target based on the empirical game)", "num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game: a mean-field", "# you may not use this file except in compliance with the License.", "mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return", "agreed to in writing, software # distributed under the License is distributed on", ":return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi))", "if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type ==", "(the \"License\"); # you may not use this file except in compliance with", "Return the output merged policy. Equivalent to merge policies and weights from get_original_policies_and_weights().", "= meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self):", ":param meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type =", ":param output_merged_policy: a merged policy induced by inner loop. :return: \"\"\" output_distribution =", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game: a mean-field game.", "game. :param oracle_type: \"BR\" exact best response or \"DQN\" RL approximate best response.", "new best-response policies to the empirical game. :param output_merged_policy: a merged policy induced", "import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import", "file except in compliance with the License. # You may obtain a copy", "initial_policies: initial policies. Uniform policies by default. :param meta_strategy_method: method for the inner", "from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic", "distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self):", "class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game,", "update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to the empirical game. :param output_merged_policy:", "License for the specific language governing permissions and # limitations under the License.", "def get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical game and corresponding output", "to in writing, software # distributed under the License is distributed on an", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "\"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None,", "field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import", "game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\",", "loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters =", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game:", "\"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def", "self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical game", "The trainer of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from", "Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self,", "inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for", "trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop", "self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist", "or implied. # See the License for the specific language governing permissions and", "self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions()", "self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "policies in the empirical game and corresponding output mixed strategies. \"\"\" weights =", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "BR target based on the empirical game) if needed. :param initial_policies: initial policies.", "the empirical game. :param output_merged_policy: a merged policy induced by inner loop. :return:", "= oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "a mean-field game. :param oracle_type: \"BR\" exact best response or \"DQN\" RL approximate", "DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache License,", "inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter +=", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "by default. :param meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game = mfg_game", "num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main training iteration.", "0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy =", "language governing permissions and # limitations under the License. \"\"\" The trainer of", "original policies in the empirical game and corresponding output mixed strategies. \"\"\" weights", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class", "\"\"\" Initialize the MFG Trainer. :param mfg_game: a mean-field game. :param oracle_type: \"BR\"", "analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy):", "policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main", "self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\" if", "greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in the empirical game and corresponding", "and # limitations under the License. \"\"\" The trainer of EGTA for mean", "Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None,", "to the empirical game. :param output_merged_policy: a merged policy induced by inner loop.", "\"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy)", "self._policies, weights def get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent to merge", "use this file except in compliance with the License. # You may obtain", "for the specific language governing permissions and # limitations under the License. \"\"\"", "= meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter =", "output merged policy. Equivalent to merge policies and weights from get_original_policies_and_weights(). \"\"\" return", "permissions and # limitations under the License. \"\"\" The trainer of EGTA for", "oracle has not been implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter", "self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to", "= 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\" if self._oracle_type", "self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested", "#TODO: check if policy and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies,", "self._output_policy = None def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter += 1", "= 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy", "import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\"", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game,", "mean-field game. :param oracle_type: \"BR\" exact best response or \"DQN\" RL approximate best", "trainer of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA", "return self._policies, weights def get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent to", "on the empirical game) if needed. :param initial_policies: initial policies. Uniform policies by", "2.0 (the \"License\"); # you may not use this file except in compliance", "corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self):", "governing permissions and # limitations under the License. \"\"\" The trainer of EGTA", "the specific language governing permissions and # limitations under the License. \"\"\" The", "from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return self._policies def get_distrbutions(self): return self._distributions", "response. :param num_inner_iters: the number of iterations for the inner loop (finding BR", "and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return self._policies def get_distrbutions(self):", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "\"\"\" Initialize policies and corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies,", "all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding", ":param num_inner_iters: the number of iterations for the inner loop (finding BR target", "the output merged policy. Equivalent to merge policies and weights from get_original_policies_and_weights(). \"\"\"", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy", "import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def", "either express or implied. # See the License for the specific language governing", "inner loop (finding BR target based on the empirical game) if needed. :param", "== \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise", "policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def", "self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\" Return original policies in the", "meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist are being updated. self._meta_strategy_method =", "def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\" if self._oracle_type == \"BR\":", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "and corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game,", "generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new", "output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\"", "= inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main training iteration. \"\"\" self._current_outer_iter", "policy induced by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi =", "Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version", "a merged policy induced by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy)", "else: raise ValueError(\"Suggested oracle has not been implemented.\") def reset(self): \"\"\" Reset the", "weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return the output merged", "MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type,", "final_step(self): \"\"\" Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop()", "the License. # You may obtain a copy of the License at #", "merge policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return self._policies", "EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies", "weights def get_merged_policy(self): \"\"\" Return the output merged policy. Equivalent to merge policies", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "check if policy and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis (EGTA) for MFGs.", "if policy and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters)", "iteration. \"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\"", "approximate best response. :param num_inner_iters: the number of iterations for the inner loop", "for the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players =", "self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None", "= self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all generated policies. \"\"\"", "with the License. # You may obtain a copy of the License at", "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under", "= self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def get_merged_policy(self): \"\"\" Return the output merged policy.", "empirical game and corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies,", ":param oracle_type: \"BR\" exact best response or \"DQN\" RL approximate best response. :param", "self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "= init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle", "\"\"\" self._current_outer_iter += 1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final", "of iterations for the inner loop (finding BR target based on the empirical", "\"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies,", "the trainer. \"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters)", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "merged policy. Equivalent to merge policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy", "init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "= mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO:", "\"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has not been implemented.\") def reset(self):", "ValueError(\"Suggested oracle has not been implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\"", "\"\"\" self._current_outer_iter = 0 self.initialize_policies_and_distributions() self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[self._meta_strategy_method](mfg_game=self._mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=self._num_inner_iters) self._inner_loop =", "= distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi = self._oracle(self._mfg_game, output_distribution) self._policies.append(greedy_pi) self._distributions.append(distribution.DistributionPolicy(self._mfg_game, greedy_pi)) def get_original_policies_and_weights(self): \"\"\"", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Return original policies in the empirical game and corresponding output mixed strategies. \"\"\"", "meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer. :param mfg_game: a mean-field game. :param", "policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self): return self._policies def", "exact best response or \"DQN\" RL approximate best response. :param num_inner_iters: the number", "def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the MFG Trainer.", "needed. :param initial_policies: initial policies. Uniform policies by default. :param meta_strategy_method: method for", "from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import meta_strategies from open_spiel.python.mfg.algorithms.EGTA import inner_loop from", "raise ValueError(\"Suggested oracle has not been implemented.\") def reset(self): \"\"\" Reset the trainer.", "num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\"", "License. \"\"\" The trainer of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import", "in compliance with the License. # You may obtain a copy of the", "of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution from open_spiel.python.mfg.algorithms.EGTA import", "MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Initialize the MFG Trainer. :param mfg_game: a mean-field game. :param oracle_type: \"BR\" exact", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize", "policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "meta_strategy_method: method for the inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type", "See the License for the specific language governing permissions and # limitations under", "\"DQN\" RL approximate best response. :param num_inner_iters: the number of iterations for the", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "not been implemented.\") def reset(self): \"\"\" Reset the trainer. \"\"\" self._current_outer_iter = 0", "\"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Initialize policies and corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions", "1 self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has not been implemented.\")", "inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and", "for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs): \"\"\" Initialize", "def final_step(self): \"\"\" Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy =", "Version 2.0 (the \"License\"); # you may not use this file except in", "analysis (EGTA) for MFGs. \"\"\" def __init__(self, mfg_game, oracle_type, num_inner_iters=None, initial_policy=None, meta_strategy_method=\"nash\", **kwargs):", "except in compliance with the License. # You may obtain a copy of", "raise NotImplementedError else: raise ValueError(\"Suggested oracle has not been implemented.\") def reset(self): \"\"\"", "being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game, policies=self._policies, distributions=self._distributions, num_iterations=num_inner_iters) self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy =", "self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist are being updated.", "RL approximate best response. :param num_inner_iters: the number of iterations for the inner", "self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy", "= meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist are being updated. self._meta_strategy_method", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "merged policy induced by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game, output_merged_policy) greedy_pi", "mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check", "elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has not been", "inner loop. \"\"\" self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters", "output_merged_policy: a merged policy induced by inner loop. :return: \"\"\" output_distribution = distribution.DistributionPolicy(self._mfg_game,", "self._inner_loop = inner_loop.InnerLoop(self._meta_strategy_method) self._output_policy = None def iteration(self): \"\"\" Main training iteration. \"\"\"", "= mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy =", "if needed. :param initial_policies: initial policies. Uniform policies by default. :param meta_strategy_method: method", "corresponding distributions. \"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy)", "game. :param output_merged_policy: a merged policy induced by inner loop. :return: \"\"\" output_distribution", "and corresponding output mixed strategies. \"\"\" weights = self._meta_strategy_method.get_weights_on_orig_policies() return self._policies, weights def", "policies. Uniform policies by default. :param meta_strategy_method: method for the inner loop. \"\"\"", "NotImplementedError else: raise ValueError(\"Suggested oracle has not been implemented.\") def reset(self): \"\"\" Reset", "policies to the empirical game. :param output_merged_policy: a merged policy induced by inner", "Equivalent to merge policies and weights from get_original_policies_and_weights(). \"\"\" return self._output_policy def get_policies(self):", "initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist are being", "self._output_policy = self._inner_loop.run_inner_loop() self.update_policies(self._output_policy) def final_step(self): \"\"\" Final analysis of all generated policies.", "\"\"\" The trainer of EGTA for mean field game.\"\"\" from open_spiel.python.mfg.algorithms import distribution", "\"\"\" Final analysis of all generated policies. \"\"\" self._meta_strategy_method.reset() self._output_policy = self._inner_loop.run_inner_loop() def", "best response or \"DQN\" RL approximate best response. :param num_inner_iters: the number of", "num_inner_iters: the number of iterations for the inner loop (finding BR target based", "initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\": raise NotImplementedError else: raise ValueError(\"Suggested oracle has not", "based on the empirical game) if needed. :param initial_policies: initial policies. Uniform policies", "self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type == \"DQN\":", "self._output_policy = self._inner_loop.run_inner_loop() def update_policies(self, output_merged_policy): \"\"\" Adding new best-response policies to the", "open_spiel.python.mfg.algorithms.EGTA import inner_loop from open_spiel.python.mfg.algorithms.EGTA import init_oracle class MFGMetaTrainer(object): \"\"\" Empirical game-theoretic analysis", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "num_inner_iters self._initial_policy = initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and", "\"\"\" if self._oracle_type == \"BR\": self._oracle, self._policies, self._distributions = init_oracle.init_br_oracle(game=self._mfg_game, initial_policy=self._initial_policy) elif self._oracle_type", "None self._current_outer_iter = 0 def initialize_policies_and_distributions(self): \"\"\" Initialize policies and corresponding distributions. \"\"\"", "response or \"DQN\" RL approximate best response. :param num_inner_iters: the number of iterations", "self.initialize_policies_and_distributions() #TODO: check if policy and dist are being updated. self._meta_strategy_method = meta_strategies.MFG_META_STRATEGY_METHODS[meta_strategy_method](mfg_game=mfg_game,", "self._mfg_game = mfg_game self._oracle_type = oracle_type self._num_players = mfg_game.num_players() self._num_inner_iters = num_inner_iters self._initial_policy", "= initial_policy self._meta_strategy_method = meta_strategy_method self.initialize_policies_and_distributions() #TODO: check if policy and dist are", "initial policies. Uniform policies by default. :param meta_strategy_method: method for the inner loop." ]
[]
[ "{'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name", "lcs of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl':", "name : str Simulation name. formats : np.array(str) List of files fopprmats to", "fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits file. Parameters ---------- sim_lcs_meta :", "MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb',", "name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table", "= v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table", "file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path + file_ext, 'rb') as", "np.array(str) List of files fopprmats to write. header : dict The simulation header.", "json_pyarrow = False from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self,", "2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan)", "+ '.pkl', 'wb') as file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs,", "warnings import pickle import pandas as pd import numpy as np try: import", "header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet' in", "\"\"\"Read a sim file. Parameters ---------- file_path : str Path of the file.", "a fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta data of all lightcurves.", "!= 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:,", "file Returns ------- pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file) fit =", "v in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header", "lcs as pickle if 'pkl' in formats: with open(wpath + name + '.pkl',", "simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path +", "sim_lcs_meta : dict{list} Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result) List of", "fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5]", ".parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path :", "Returns ------- None Just write files. \"\"\" # Export lcs as pickle if", "write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof']", "write. header : dict The simulation header. data : pandas.DataFrame Dataframe containing lcs.", "name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module", "import pandas as pd import numpy as np try: import json import pyarrow", "def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path : str Path of", "True except ImportError: json_pyarrow = False from . import salt_utils as salt_ut class", "k, v in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table = pa.Table.from_pandas(df)", "table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module to write", ": pandas.DataFrame Dataframe containing lcs. Returns ------- None Just write files. \"\"\" #", "file : str Fit results parquet file Returns ------- pandas.DataFrame The fit results.", "= salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c'])", "pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs =", "file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName =", "'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys:", "class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating):", "2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof'])", "file. Parameters ---------- sim_lcs_meta : dict{list} Meta data of all lightcurves. fit_res :", "sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1',", "= {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet'", "cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()}", "try: import json import pyarrow as pa import pyarrow.parquet as pq json_pyarrow =", "= snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0]))", "data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table", "of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with", "Returns ------- None Just write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys =", "write file. sim_meta : dict General simulation meta data. Returns ------- None Just", "dict{list} Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit", "containing lcs. Returns ------- None Just write files. \"\"\" # Export lcs as", "'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0',", "List of sncosmo fit results for each lightcurve. directory : str Destination of", "= pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode():", "the lcs of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext ==", "path where to write file. name : str Simulation name. formats : np.array(str)", "'wb') as file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header}", "str, dict, pandas.DataFrame The name, the header and the lcs of the simulation.", "The name, the header and the lcs of the simulation. \"\"\" file_path, file_ext", "+ name + '.pkl', 'wb') as file: pkl_dic = {'name': name, 'lcs': data.to_dict(),", "and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder)", "write parquet formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}):", "file. name : str Simulation name. formats : np.array(str) List of files fopprmats", "data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov", "UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path : str Path", "False from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if", "header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs", "snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in", "data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys:", "MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c',", "json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta", "Parameters ---------- wpath : str The path where to write file. name :", "pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if", "+ file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for", "inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext ==", "data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2])", "int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return", "pyarrow.parquet as pq json_pyarrow = True except ImportError: json_pyarrow = False from .", "a sim file. Parameters ---------- file_path : str Path of the file. Returns", "into a fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta data of all", "pq json_pyarrow = True except ImportError: json_pyarrow = False from . import salt_utils", "table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()}", "of all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit results for each", "elif file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs =", "the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path", "file. sim_meta : dict General simulation meta data. Returns ------- None Just write", "fit results. \"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs = json.loads(table.schema.metadata['header'.encode()]) fit.set_index(['ID'],", "name, formats, header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath : str The", ": str Fit results parquet file Returns ------- pandas.DataFrame The fit results. \"\"\"", "wpath : str The path where to write file. name : str Simulation", "write file. name : str Simulation name. formats : np.array(str) List of files", "for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You", "Parameters ---------- file : str Fit results parquet file Returns ------- pandas.DataFrame The", "lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif", "super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs. Parameters ----------", "dict General simulation meta data. Returns ------- None Just write a file. \"\"\"", "self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath", "json modules to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file.", "2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for k, v in", "Path of the file. Returns ------- str, dict, pandas.DataFrame The name, the header", "{int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()])", "of files fopprmats to write. header : dict The simulation header. data :", "Just write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2',", "import pyarrow as pa import pyarrow.parquet as pq json_pyarrow = True except ImportError:", "json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module to write parquet formats\", UserWarning)", "data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0,", "write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits file. Parameters ---------- sim_lcs_meta", "format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path : str", "= [] for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if", "open(file_path + file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID',", "pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header']", "io stuff.\"\"\" import os import warnings import pickle import pandas as pd import", "numpy as np try: import json import pyarrow as pa import pyarrow.parquet as", "in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0,", "module to write parquet formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res,", "data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1])", "Dataframe containing lcs. Returns ------- None Just write files. \"\"\" # Export lcs", "file_path : str Path of the file. Returns ------- str, dict, pandas.DataFrame The", "pyarrow and json module to write parquet formats\", UserWarning) return name, header, lcs", "else: warnings.warn(\"You need pyarrow and json module to write parquet formats\", UserWarning) return", "except ImportError: json_pyarrow = False from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder):", "str Fit results parquet file Returns ------- pandas.DataFrame The fit results. \"\"\" table", "'rb') as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs", "data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k", "pa import pyarrow.parquet as pq json_pyarrow = True except ImportError: json_pyarrow = False", "list(sncosmo.utils.Result) List of sncosmo fit results for each lightcurve. directory : str Destination", "lcs. Returns ------- None Just write files. \"\"\" # Export lcs as pickle", "'.parquet') print(f\"Fit result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit", "if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray):", "warnings.warn('You need pyarrow and json modules to use .parquet format', UserWarning) def read_sim_file(file_path):", "fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0,", "data of all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit results for", "import pickle import pandas as pd import numpy as np try: import json", "file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for k,", "directory + '.parquet') print(f\"Fit result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to", "np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist()", "if file_ext == '.pkl': with open(file_path + file_ext, 'rb') as f: pkl_dic =", "formats : np.array(str) List of files fopprmats to write. header : dict The", "'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:]", "lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif", "pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow:", "write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath : str", "table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output file : {directory}.parquet\") def", "'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0',", "lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit results for each lightcurve. directory", "\"\"\"USe to open fit file. Parameters ---------- file : str Fit results parquet", "header and the lcs of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if", "'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet')", "isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header,", "lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet':", "The path where to write file. name : str Simulation name. formats :", "lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet' in formats", "fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'):", "in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and", ": dict General simulation meta data. Returns ------- None Just write a file.", ": str Path of the file. Returns ------- str, dict, pandas.DataFrame The name,", "directory : str Destination of write file. sim_meta : dict General simulation meta", "as np try: import json import pyarrow as pa import pyarrow.parquet as pq", "the header and the lcs of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path)", "v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table =", "'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] = []", "the file. Returns ------- str, dict, pandas.DataFrame The name, the header and the", "name, the header and the lcs of the simulation. \"\"\" file_path, file_ext =", "in fit_keys: data[k] = [] for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out", "pickle import pandas as pd import numpy as np try: import json import", "from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj,", "Parameters ---------- file_path : str Path of the file. Returns ------- str, dict,", "for k in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] = v", "= json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result", "= False from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj):", "name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name +", "fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0]))", "= pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet': if", "in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0'])", "('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1',", "return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs. Parameters", "'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats and", "def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj)", "of sncosmo fit results for each lightcurve. directory : str Destination of write", "Destination of write file. sim_meta : dict General simulation meta data. Returns -------", "pandas.DataFrame The name, the header and the lcs of the simulation. \"\"\" file_path,", "lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta =", "table = pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs =", "= pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name =", "data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if", "name + '.pkl', 'wb') as file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta':", "= json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(),", "data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1])", ": {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file. Parameters ---------- file :", "------- None Just write files. \"\"\" # Export lcs as pickle if 'pkl'", "header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit", "isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return", "with open(file_path + file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs'])", "header. data : pandas.DataFrame Dataframe containing lcs. Returns ------- None Just write files.", "= os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path + file_ext, 'rb') as f:", "ImportError: json_pyarrow = False from . import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def", "as pd import numpy as np try: import json import pyarrow as pa", "'.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'],", "header : dict The simulation header. data : pandas.DataFrame Dataframe containing lcs. Returns", "= table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for k, val in", "'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] =", "MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0'])", "and not json_pyarrow: warnings.warn('You need pyarrow and json modules to use .parquet format',", ": np.array(str) List of files fopprmats to write. header : dict The simulation", "table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output file :", "if 'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder)", "stuff.\"\"\" import os import warnings import pickle import pandas as pd import numpy", "file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True)", "results for each lightcurve. directory : str Destination of write file. sim_meta :", "np try: import json import pyarrow as pa import pyarrow.parquet as pq json_pyarrow", "results. \"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs = json.loads(table.schema.metadata['header'.encode()]) fit.set_index(['ID'], inplace=True)", "\"\"\"Write simulated lcs. Parameters ---------- wpath : str The path where to write", "pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name']", "'.pkl', 'wb') as file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header':", "import json import pyarrow as pa import pyarrow.parquet as pq json_pyarrow = True", "str The path where to write file. name : str Simulation name. formats", ": str The path where to write file. name : str Simulation name.", "'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] = [] for obj_ID in fit_res:", "1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for", "in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c',", "os import warnings import pickle import pandas as pd import numpy as np", "data. Returns ------- None Just write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys", "# Export lcs as pickle if 'pkl' in formats: with open(wpath + name", ": dict{list} Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo", "pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path", "data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1,", "---------- file : str Fit results parquet file Returns ------- pandas.DataFrame The fit", "None Just write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0',", "open_fit(file): \"\"\"USe to open fit file. Parameters ---------- file : str Fit results", "in formats: with open(wpath + name + '.pkl', 'wb') as file: pkl_dic =", "json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode():", "pyarrow and json modules to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a", "if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True)", "lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits file. Parameters", "file. Parameters ---------- file : str Fit results parquet file Returns ------- pandas.DataFrame", "1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0,", "simulation meta data. Returns ------- None Just write a file. \"\"\" data =", "output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file. Parameters ----------", "\"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs = json.loads(table.schema.metadata['header'.encode()]) fit.set_index(['ID'], inplace=True) return", "obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN':", "fit_keys: data[k] = [] for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out =", "+ '.parquet') elif 'parquet' in formats and not json_pyarrow: warnings.warn('You need pyarrow and", "file. Returns ------- str, dict, pandas.DataFrame The name, the header and the lcs", "def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath :", "return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a", "float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name,", "<reponame>bcarreres/snsim \"\"\"This module contains io stuff.\"\"\" import os import warnings import pickle import", "------- str, dict, pandas.DataFrame The name, the header and the lcs of the", "\"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name']", "files. \"\"\" # Export lcs as pickle if 'pkl' in formats: with open(wpath", "if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1',", "fit results for each lightcurve. directory : str Destination of write file. sim_meta", "cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs,", "isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj)", "result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file. Parameters", "parquet formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write", "file. Parameters ---------- file_path : str Path of the file. Returns ------- str,", "and json modules to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim", "else: for k in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] =", "= {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath +", "as pa import pyarrow.parquet as pq json_pyarrow = True except ImportError: json_pyarrow =", "f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta']", "for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out !=", "snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov =", ". import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer):", ": dict The simulation header. data : pandas.DataFrame Dataframe containing lcs. Returns -------", "par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0,", "dict, pandas.DataFrame The name, the header and the lcs of the simulation. \"\"\"", "= sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5]", "= fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2',", "modules to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters", "and the lcs of the simulation. \"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext", "'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow: lcs", "= pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k):", "return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write", "pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table,", "'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet'", "= True except ImportError: json_pyarrow = False from . import salt_utils as salt_ut", "sim_meta : dict General simulation meta data. Returns ------- None Just write a", "sim_meta={}): \"\"\"Write fit into a fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta", "contains io stuff.\"\"\" import os import warnings import pickle import pandas as pd", ": str Destination of write file. sim_meta : dict General simulation meta data.", "name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats", "data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq'])", "sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta,", "pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val", "if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov", "as pq json_pyarrow = True except ImportError: json_pyarrow = False from . import", "'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for", "'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c',", "'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data)", "as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs =", "inplace=True) lcs.attrs = {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode()", "json import pyarrow as pa import pyarrow.parquet as pq json_pyarrow = True except", "'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb'])", "import pyarrow.parquet as pq json_pyarrow = True except ImportError: json_pyarrow = False from", "sim file. Parameters ---------- file_path : str Path of the file. Returns -------", "Returns ------- str, dict, pandas.DataFrame The name, the header and the lcs of", "header = pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path +", "lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header", "elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder,", "snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1'])", "pandas.DataFrame Dataframe containing lcs. Returns ------- None Just write files. \"\"\" # Export", "pq.write_table(table, directory + '.parquet') print(f\"Fit result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe", "fit file. Parameters ---------- file : str Fit results parquet file Returns -------", "fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data)", "of the file. Returns ------- str, dict, pandas.DataFrame The name, the header and", "pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta =", "module contains io stuff.\"\"\" import os import warnings import pickle import pandas as", "json_pyarrow = True except ImportError: json_pyarrow = False from . import salt_utils as", "name + '.parquet') elif 'parquet' in formats and not json_pyarrow: warnings.warn('You need pyarrow", "data[k] = [] for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out']", "df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode():", "= pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory +", "file_ext == '.pkl': with open(file_path + file_ext, 'rb') as f: pkl_dic = pickle.load(f)", "warnings.warn(\"You need pyarrow and json module to write parquet formats\", UserWarning) return name,", "+ file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'],", "formats, header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath : str The path", "= pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext)", "dict The simulation header. data : pandas.DataFrame Dataframe containing lcs. Returns ------- None", "Export lcs as pickle if 'pkl' in formats: with open(wpath + name +", "---------- sim_lcs_meta : dict{list} Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result) List", "meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath", "lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name", "---------- file_path : str Path of the file. Returns ------- str, dict, pandas.DataFrame", "np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def", "data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for k, v", "\"\"\"This module contains io stuff.\"\"\" import os import warnings import pickle import pandas", "fopprmats to write. header : dict The simulation header. data : pandas.DataFrame Dataframe", "[] for obj_ID in fit_res: fd = fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out", "json module to write parquet formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta,", "0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2])", "('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0]))", "if 'pkl' in formats: with open(wpath + name + '.pkl', 'wb') as file:", "pkl_dic = pickle.load(f) lcs = pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name", "== '.pkl': with open(file_path + file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs", "pyarrow as pa import pyarrow.parquet as pq json_pyarrow = True except ImportError: json_pyarrow", "lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode():", "'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k]", "for k in fit_keys: data[k] = [] for obj_ID in fit_res: fd =", "import numpy as np try: import json import pyarrow as pa import pyarrow.parquet", "header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output file : {directory}.parquet\") def open_fit(file):", "'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header = pkl_dic['header'] elif file_ext", "2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in", "'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] = [] for obj_ID in", "= {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header =", "to write parquet formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory,", "= lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet' in formats and", "List of files fopprmats to write. header : dict The simulation header. data", "open fit file. Parameters ---------- file : str Fit results parquet file Returns", "Just write files. \"\"\" # Export lcs as pickle if 'pkl' in formats:", "str Path of the file. Returns ------- str, dict, pandas.DataFrame The name, the", "str Simulation name. formats : np.array(str) List of files fopprmats to write. header", "== '.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID',", "formats\", UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit", "header, data): \"\"\"Write simulated lcs. Parameters ---------- wpath : str The path where", "pickle if 'pkl' in formats: with open(wpath + name + '.pkl', 'wb') as", "def open_fit(file): \"\"\"USe to open fit file. Parameters ---------- file : str Fit", "fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta data of all lightcurves. fit_res", "\"\"\" # Export lcs as pickle if 'pkl' in formats: with open(wpath +", "where to write file. name : str Simulation name. formats : np.array(str) List", "return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else:", "The simulation header. data : pandas.DataFrame Dataframe containing lcs. Returns ------- None Just", "to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ----------", "header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module to write parquet", "salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2,", "table = pq.read_table(file) fit = table.to_pandas() fit.attrs = json.loads(table.schema.metadata['header'.encode()]) fit.set_index(['ID'], inplace=True) return fit", "------- None Just write a file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0',", "sncosmo fit results for each lightcurve. directory : str Destination of write file.", "'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in", "formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header,", "k in fit_keys: data[k] = [] for obj_ID in fit_res: fd = fit_res[obj_ID]['params']", "str Destination of write file. sim_meta : dict General simulation meta data. Returns", "Parameters ---------- sim_lcs_meta : dict{list} Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result)", "cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output file", "for each lightcurve. directory : str Destination of write file. sim_meta : dict", "use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path", "fit into a fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta data of", "json_pyarrow: table = pq.read_table(file_path + file_ext) lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs", "= fit_res[obj_ID]['params'] snc_out = fit_res[obj_ID]['snc_out'] if snc_out != 'NaN': data['t0'].append(fd['t0']) data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if", "'.parquet') elif 'parquet' in formats and not json_pyarrow: warnings.warn('You need pyarrow and json", "['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c']", "Returns ------- pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file) fit = table.to_pandas()", "{directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file. Parameters ---------- file : str", "+ '.parquet') print(f\"Fit result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open", "'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] = [] for obj_ID", "Simulation name. formats : np.array(str) List of files fopprmats to write. header :", "pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet')", "directory, sim_meta={}): \"\"\"Write fit into a fits file. Parameters ---------- sim_lcs_meta : dict{list}", "data): \"\"\"Write simulated lcs. Parameters ---------- wpath : str The path where to", "['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys", "= table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module to", "0])) if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'],", "simulated lcs. Parameters ---------- wpath : str The path where to write file.", "write files. \"\"\" # Export lcs as pickle if 'pkl' in formats: with", "data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow:", "------- pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs", "+= ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c',", "and json module to write parquet formats\", UserWarning) return name, header, lcs def", "with open(wpath + name + '.pkl', 'wb') as file: pkl_dic = {'name': name,", "obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated", "'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k", "par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2]))", "else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data): \"\"\"Write simulated lcs.", "lightcurve. directory : str Destination of write file. sim_meta : dict General simulation", "elif 'parquet' in formats and not json_pyarrow: warnings.warn('You need pyarrow and json modules", "lcs.replace_schema_metadata(meta) pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet' in formats and not", "in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] = v df =", "0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0,", "file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path + file_ext, 'rb')", "formats: with open(wpath + name + '.pkl', 'wb') as file: pkl_dic = {'name':", "data[k] = v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder)", "json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json", "not json_pyarrow: warnings.warn('You need pyarrow and json modules to use .parquet format', UserWarning)", "json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output", "= json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow and json module to write parquet formats\",", "data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for", "'.pkl': with open(file_path + file_ext, 'rb') as f: pkl_dic = pickle.load(f) lcs =", "'cov_x1_c'] for k in fit_keys: data[k] = [] for obj_ID in fit_res: fd", "in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table = pa.Table.from_pandas(df) header =", "'parquet' in formats and not json_pyarrow: warnings.warn('You need pyarrow and json modules to", "= pkl_dic['name'] header = pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table =", "lcs = table.to_pandas() lcs.set_index(['ID', 'epochs'], inplace=True) lcs.attrs = {int(k): val for k, val", "'pkl' in formats: with open(wpath + name + '.pkl', 'wb') as file: pkl_dic", "data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2]) data['cov_x1_c'].append(par_cov[1, 2]) data['cov_mb_x1'].append(mb_cov[0, 1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else:", "header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits file.", ": list(sncosmo.utils.Result) List of sncosmo fit results for each lightcurve. directory : str", "file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file. Parameters ---------- file", "open(wpath + name + '.pkl', 'wb') as file: pkl_dic = {'name': name, 'lcs':", "if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov)", "read_sim_file(file_path): \"\"\"Read a sim file. Parameters ---------- file_path : str Path of the", "data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0, 1]) data['cov_x0_c'].append(par_cov[0, 2])", "NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return", "salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj,", "np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats, header, data):", "in formats and not json_pyarrow: warnings.warn('You need pyarrow and json modules to use", "a file. \"\"\" data = sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName", "parquet file Returns ------- pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file) fit", "= json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs =", "import os import warnings import pickle import pandas as pd import numpy as", "name. formats : np.array(str) List of files fopprmats to write. header : dict", "salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj)", "fit_res : list(sncosmo.utils.Result) List of sncosmo fit results for each lightcurve. directory :", "meta data. Returns ------- None Just write a file. \"\"\" data = sim_lcs_meta.copy()", "need pyarrow and json module to write parquet formats\", UserWarning) return name, header,", "lcs.attrs = {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header", "Fit results parquet file Returns ------- pandas.DataFrame The fit results. \"\"\" table =", "default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif", "pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header = json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(),", "print(f\"Fit result output file : {directory}.parquet\") def open_fit(file): \"\"\"USe to open fit file.", "pandas as pd import numpy as np try: import json import pyarrow as", "data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items():", "sim_lcs_meta.copy() fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in", "results parquet file Returns ------- pandas.DataFrame The fit results. \"\"\" table = pq.read_table(file)", "None Just write files. \"\"\" # Export lcs as pickle if 'pkl' in", "pq.write_table(lcs, wpath + name + '.parquet') elif 'parquet' in formats and not json_pyarrow:", "file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs = table.to_pandas()", "data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow: lcs =", "wpath + name + '.parquet') elif 'parquet' in formats and not json_pyarrow: warnings.warn('You", "fit_keys = ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2',", "= pd.DataFrame(data) table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()})", "as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif", "= pd.DataFrame.from_dict(pkl_dic['lcs']) lcs.index.set_names(['ID', 'epochs'], inplace=True) lcs.attrs = pkl_dic['meta'] name = pkl_dic['name'] header =", "\"\"\"Write fit into a fits file. Parameters ---------- sim_lcs_meta : dict{list} Meta data", "name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits", "obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj,", "in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header =", "'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1', 'cov_mb_c', 'cov_x1_c'] for k in fit_keys: data[k] = [] for", "'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs, cls=NpEncoder) header", "to write file. name : str Simulation name. formats : np.array(str) List of", "elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath, name, formats,", "os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path + file_ext, 'rb') as f: pkl_dic", "each lightcurve. directory : str Destination of write file. sim_meta : dict General", "all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit results for each lightcurve.", "to open fit file. Parameters ---------- file : str Fit results parquet file", "file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file)", "val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else:", "1:] mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1,", "'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys +=", "= table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory + '.parquet') print(f\"Fit result output file : {directory}.parquet\")", "pkl_dic['header'] elif file_ext == '.parquet': if json_pyarrow: table = pq.read_table(file_path + file_ext) lcs", "'epochs'], inplace=True) lcs.attrs = {int(k): val for k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name =", "UserWarning) return name, header, lcs def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into", "for k, v in sim_lcs_meta.items(): data[k] = v df = pd.DataFrame(data) table =", "header} pickle.dump(pkl_dic, file) if 'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta", "json.dumps(header, cls=NpEncoder) meta = {'name'.encode(): name.encode(), 'attrs'.encode(): lcmeta.encode(), 'header'.encode(): header.encode()} lcs = lcs.replace_schema_metadata(meta)", "{'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic, file) if 'parquet' in", "return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def write_sim(wpath,", "k in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k] = v df", "pd import numpy as np try: import json import pyarrow as pa import", "files fopprmats to write. header : dict The simulation header. data : pandas.DataFrame", ": str Simulation name. formats : np.array(str) List of files fopprmats to write.", "\"\"\" file_path, file_ext = os.path.splitext(file_path) if file_ext == '.pkl': with open(file_path + file_ext,", "The fit results. \"\"\" table = pq.read_table(file) fit = table.to_pandas() fit.attrs = json.loads(table.schema.metadata['header'.encode()])", "json_pyarrow: warnings.warn('You need pyarrow and json modules to use .parquet format', UserWarning) def", "need pyarrow and json modules to use .parquet format', UserWarning) def read_sim_file(file_path): \"\"\"Read", "file) if 'parquet' in formats and json_pyarrow: lcs = pa.Table.from_pandas(data) lcmeta = json.dumps(data.attrs,", "import salt_utils as salt_ut class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return", "k, val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need", "to write. header : dict The simulation header. data : pandas.DataFrame Dataframe containing", "---------- wpath : str The path where to write file. name : str", "import warnings import pickle import pandas as pd import numpy as np try:", "as pickle if 'pkl' in formats: with open(wpath + name + '.pkl', 'wb')", "of write file. sim_meta : dict General simulation meta data. Returns ------- None", "fit_keys += ['x0', 'e_x0', 'mb', 'e_mb', 'x1', 'e_x1', 'c', 'e_c', 'cov_x0_x1', 'cov_x0_c', 'cov_mb_x1',", "data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1])) data['c'].append(fd['c']) data['e_c'].append(np.sqrt(par_cov[2, 2])) data['cov_x0_x1'].append(par_cov[0,", "data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for k, v in sim_lcs_meta.items(): data[k]", "as file: pkl_dic = {'name': name, 'lcs': data.to_dict(), 'meta': data.attrs, 'header': header} pickle.dump(pkl_dic,", "General simulation meta data. Returns ------- None Just write a file. \"\"\" data", "= sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'): fit_keys += ['x0', 'e_x0', 'mb', 'e_mb',", "data : pandas.DataFrame Dataframe containing lcs. Returns ------- None Just write files. \"\"\"", "Meta data of all lightcurves. fit_res : list(sncosmo.utils.Result) List of sncosmo fit results", "simulation header. data : pandas.DataFrame Dataframe containing lcs. Returns ------- None Just write", "val in json.loads(table.schema.metadata['attrs'.encode()]).items()} name = table.schema.metadata['name'.encode()].decode() header = json.loads(table.schema.metadata['header'.encode()]) else: warnings.warn(\"You need pyarrow", "formats and not json_pyarrow: warnings.warn('You need pyarrow and json modules to use .parquet", "mb_cov = salt_ut.cov_x0_to_mb(fd['x0'], par_cov) data['x0'].append(fd['x0']) data['e_x0'].append(np.sqrt(par_cov[0, 0])) data['mb'].append(fd['mb']) data['e_mb'].append(np.sqrt(mb_cov[0, 0])) data['x1'].append(fd['x1']) data['e_x1'].append(np.sqrt(par_cov[1, 1]))", "table = pa.Table.from_pandas(df) header = json.dumps(sim_meta, cls=NpEncoder) table = table.replace_schema_metadata({'header'.encode(): header.encode()}) pq.write_table(table, directory", "def write_fit(sim_lcs_meta, fit_res, directory, sim_meta={}): \"\"\"Write fit into a fits file. Parameters ----------", "= ['t0', 'e_t0', 'chi2', 'ndof'] MName = sim_meta['model_name'] if MName[:5] in ('salt2', 'salt3'):", "lcs. Parameters ---------- wpath : str The path where to write file. name", "+ name + '.parquet') elif 'parquet' in formats and not json_pyarrow: warnings.warn('You need", "1]) data['cov_mb_c'].append(mb_cov[0, 2]) data['chi2'].append(snc_out['chisq']) data['ndof'].append(snc_out['ndof']) else: for k in fit_keys: data[k].append(np.nan) for k,", "data['e_t0'].append(np.sqrt(snc_out['covariance'][0, 0])) if MName[:5] in ('salt2', 'salt3'): par_cov = snc_out['covariance'][1:, 1:] mb_cov =" ]
[ "i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set", "verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores =", "entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores =", "entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version':", "(idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set +", "= softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis,", "segment_scores in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb,", "zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior", "in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape)", "== 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4:", "noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores)", "numpy as np import pandas as pd from epic_kitchens.meta import training_labels, test_timestamps def", "pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0,", "in prior else 0.0) * float(score) for verb, noun, score in zip(verbs, nouns,", "encoding='utf8') as f: json.dump(results_dict, f) return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True,", "= action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:]))", "action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict,", "prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8')", "x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x", "np import pandas as pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): '''", "segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, }", "10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True", "for verb, noun, score in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries", "if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if test_set ==", "enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior) if", "['seen', 'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts()", "verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid,", "0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res = softmax(np.array([[0,", "then convert back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments,", "pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps =", "in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior)", "pathlib import Path import numpy as np import pandas as pd from epic_kitchens.meta", "if test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action =", "1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores,", "= scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict,", "prior_action else: prior = None results = pd.read_pickle(args.results_dir / ('test_' + test_set +", "True >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]]))", "top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] *", ":] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:,", "'results': entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results,", "results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int)", "action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return {", "= np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def", "parser = argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(\"results_dir\", type=Path) parser.add_argument(\"submission_json\",", "segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun':", "[] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes,", "1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True >>> res =", "def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1,", "= np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back to verb/noun", "def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return", "= scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids,", "in ['seen', 'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']),", "zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results,", "scores_to_json(scores): entries = [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls,", "= softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1)", "= softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.])", "float(score) for cls, score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores", "test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json", "prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior = None results = pd.read_pickle(args.results_dir", "(d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior = None", "actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict =", "training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior", "top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs =", "top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100):", "200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim == 1: x", "'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict,", "exist_ok=True) for test_set in ['seen', 'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda", "Path import numpy as np import pandas as pd from epic_kitchens.meta import training_labels,", "dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids,", "= pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps", "1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx]", "for verb, noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)])", "'0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict,", "'.json'), prior) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission", "noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb,", "uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb':", "test_set in ['seen', 'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'],", "= top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix", "compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior)", "if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape)", ":n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back to verb/noun indices segments", "prior = prior_action else: prior = None results = pd.read_pickle(args.results_dir / ('test_' +", "training_labels, test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res)", "= softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count =", "::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert", "d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior = None results", "for cls, score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores =", "scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict", "(float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0) * float(score) for verb,", "= [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in", "200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res", "max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x /", "return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores", "axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior = None results =", "return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores", "noun): float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun):", "f) return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in", "argsort, then convert back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return", "None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) else:", "convert back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx],", "''' >>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res -", "= test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids,", "for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)})", "{ 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath, prior):", "verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True,", "noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict", "if (verb, noun) in prior else 0.0) * float(score) for verb, noun, score", "json from pathlib import Path import numpy as np import pandas as pd", "row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'),", "verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores =", "1., 1., 1.]) >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>>", "with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict def main(args): if", "softmax(x): ''' >>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res", "-1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries = [] for verbs, nouns,", "else 0.0) * float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) return", "segment_scores)}) return entries def scores_to_json(scores): entries = [] for classes, segment_scores in zip(*top_scores(scores)):", "/ ('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set)", "4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores)", "results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as", "def scores_to_json(scores): entries = [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for", "axis=1) array([ 1., 1., 1.]) >>> res = softmax(np.array([[0, 200, 10], [0, 10,", "noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back to", "prior else 0.0) * float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)})", "compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs =", "instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions,", "'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict def main(args): if not args.submission_json.exists():", "= results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores =", "0])) < 0.0001) True >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200],", "np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim == 1: x = x.reshape((1,", "/ np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0,", ":n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:,", "200, 10], [0, 10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1.,", "not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if test_set == 'unseen':", "softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) '''", "np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back to verb/noun indices", "1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores)", "1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x", "cls, score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb']", "noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return", "[200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res =", "results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if", "zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) ==", "args.submission_json / (test_set + '.json'), prior) if __name__ == '__main__': import argparse parser", "exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores =", "'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition',", "entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0) * float(score)", "np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores,", "= to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f:", "noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict,", "back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments,", "segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)}) return entries", "scores, prior): entries = [] for verbs, nouns, segment_scores in zip(*actions, scores): if", "= action_counts.div(action_counts.sum()) prior = prior_action else: prior = None results = pd.read_pickle(args.results_dir /", "/ (test_set + '.json'), prior) if __name__ == '__main__': import argparse parser =", "def action_scores_to_json(actions, scores, prior): entries = [] for verbs, nouns, segment_scores in zip(*actions,", "open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict def main(args): if not", "import json from pathlib import Path import numpy as np import pandas as", "action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO:", "np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx =", "top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores", "noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return", "action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action", "< 0.0001) True >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200,", "= results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun']", "noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def", "prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores", "test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx,", "top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:,", "''' if x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1,", ">>> np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim == 1: x =", "2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict", "score in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries = [] for", "= top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis]", "n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs", "uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()):", "parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict def main(args):", "200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) ''' if", ">>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([", "test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum())", "prior): entries = [] for verbs, nouns, segment_scores in zip(*actions, scores): if prior", "import numpy as np import pandas as pd from epic_kitchens.meta import training_labels, test_timestamps", "-1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x", "score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if", "np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True >>> res = softmax(np.array([[0, 200,", "= argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(\"results_dir\", type=Path) parser.add_argument(\"submission_json\", type=Path)", "nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else", "top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx,", "entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict):", "uids, args.submission_json / (test_set + '.json'), prior) if __name__ == '__main__': import argparse", "= action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict):", "uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict,", "score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun)", "= softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0]))", "1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def", "return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores", "from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0, 200,", "filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict,", "as pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>> res =", ">>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True >>>", "timestamps = test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results,", "top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:,", "= np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]])", "dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx)", "from pathlib import Path import numpy as np import pandas as pd from", "= scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs,", "segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0)", "compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2))", "= compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores,", "= [] for verbs, nouns, segment_scores in zip(*actions, scores): if prior is None:", "def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict =", "segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict,", "entries = [] for verbs, nouns, segment_scores in zip(*actions, scores): if prior is", "zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries = [] for classes, segment_scores", "verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict }", "verbs, nouns, segment_scores in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score)", "def softmax(x): ''' >>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>>", "axis=1) array([ 1., 1.]) ''' if x.ndim == 1: x = x.reshape((1, -1))", "verb, noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if", "'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def", "entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior)", "argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(\"results_dir\", type=Path) parser.add_argument(\"submission_json\", type=Path) main(parser.parse_args())", "action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with", "args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if test_set == 'unseen': action_counts", "::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores,", "noun) in prior else 0.0) * float(score) for verb, noun, score in zip(verbs,", "in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in", "* float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) return entries def", "0.0) * float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) return entries", "top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count", "= np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x,", "top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries = []", "return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen',", "entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb,", "args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if test_set == 'unseen': action_counts =", "noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return", "== 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x =", "nouns, segment_scores in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for", "x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x -", "verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2))", "noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries = [] for", "for verbs, nouns, segment_scores in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun):", "{} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] =", "top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores =", "import Path import numpy as np import pandas as pd from epic_kitchens.meta import", "dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior) if __name__ == '__main__': import", "# TODO: Reshape, argsort, then convert back to verb/noun indices segments = np.arange(0,", "zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict", "test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0", "top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n,", "np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1,", "return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior):", "classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)}) return", "np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res = softmax(np.array([[0, 200, 10], [0,", "10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>>", "action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries", "200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001)", ":n]]) def action_scores_to_json(actions, scores, prior): entries = [] for verbs, nouns, segment_scores in", "2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions,", "= np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx", "action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries =", "} return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids,", "softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :]", "= x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x)", "scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict,", "'__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "nouns, segment_scores)}) return entries def scores_to_json(scores): entries = [] for classes, segment_scores in", "argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(\"results_dir\", type=Path)", "-1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then", "action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks", "action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries = [] for verbs,", "10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim == 1:", "verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries =", "= action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape,", "+ '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row)", "to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]),", "instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n],", "def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs", "top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx", "verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids,", "1., 1.]) ''' if x.ndim == 1: x = x.reshape((1, -1)) max_x =", "f: json.dump(results_dict, f) return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for", "main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if test_set", "= noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict =", "in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun,", "((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries", "1., 1.]) >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res,", "compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w',", "action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort,", "Reshape, argsort, then convert back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1)", "[0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim ==", "verb/noun indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count,", ">>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]])) >>>", "4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores", "prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict)", "np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1]", "return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for", "results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores,", "0.0001) True >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0,", "'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict", "in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries = [] for classes,", "- np.array([0, 1, 0])) < 0.0001) True >>> res = softmax(np.array([[0, 200, 10],", "'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries,", "return entries def scores_to_json(scores): entries = [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls):", "exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores):", "action_counts.div(action_counts.sum()) prior = prior_action else: prior = None results = pd.read_pickle(args.results_dir / ('test_'", "== '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\",", "prior) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON", "float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores):", "segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:,", "noun, score in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries = []", ">>> res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0,", "prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {}", "= str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior) if __name__ ==", "if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores)", "str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior) if __name__ == '__main__':", "as f: json.dump(results_dict, f) return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True)", "(test_set + '.json'), prior) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(", "= compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath,", "noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores", "TODO: Reshape, argsort, then convert back to verb/noun indices segments = np.arange(0, instance_count).reshape(-1,", "for test_set in ['seen', 'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda d:", "x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return", "entries = [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score", "action_scores_to_json(actions, scores, prior): entries = [] for verbs, nouns, segment_scores in zip(*actions, scores):", "softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([", "action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) #", "+ '.json'), prior) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce", "'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior =", "for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = {", "results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']:", "noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores)", "__name__ == '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from results", "'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict =", "import pandas as pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>>", "top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores)", "np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] =", "if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in zip(verbs,", "scores[np.arange(0, len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores", "= verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1,", "= prior_action else: prior = None results = pd.read_pickle(args.results_dir / ('test_' + test_set", "import training_labels, test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0, 200, 10])) >>>", "np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments, action_ranks[:, :n]]) def", "len(noun_scores.shape) == 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict", "'unseen']: if test_set == 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action", "{ 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge':", "np.array([0, 1, 0])) < 0.0001) True >>> res = softmax(np.array([[0, 200, 10], [0,", "res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]])) >>> np.sum(res,", "verb, noun, score in zip(verbs, nouns, segment_scores)}) return entries def scores_to_json(scores): entries =", "verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f)", "= training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else:", "np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True >>> res", "= None results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids =", "entries def scores_to_json(scores): entries = [] for classes, segment_scores in zip(*top_scores(scores)): entries.append({str(cls): float(score)", "res = softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1.,", "to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in", "* top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx,", "verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict,", "in zip(*top_scores(scores)): entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)}) return entries def", "segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition', 'results':", "1.]) ''' if x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x,", "== 4: noun_scores = noun_scores.mean(axis=(1, 2)) actions, action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict =", "pandas as pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>> res", "action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict", "np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1,", "action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict,", "indices segments = np.arange(0, instance_count).reshape(-1, 1) return ((top_verbs[segments, verb_ranks_idx], top_nouns[segments, noun_ranks_idx]), action_probs_matrix.reshape(instance_count, -1)[segments,", "} def dump_scores_to_json(results, uids, filepath, prior): verb_scores_dict, noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict", "'.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row) in", "axis=1).reshape((-1, 1)) exp_x = np.exp(x - max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))", "epic_kitchens.meta import training_labels, test_timestamps def softmax(x): ''' >>> res = softmax(np.array([0, 200, 10]))", "top_verb_scores = top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores)", "action_scores_dict = action_scores_to_json(actions, action_scores, prior) return verb_scores_dict, noun_scores_dict, action_scores_dict def to_json(uids, verb_scores_dict, noun_scores_dict,", "softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1, 0])) <", ">>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001) True >>> res = softmax(np.array([[0,", "np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count, -1).argsort(axis=-1)[:, ::-1] verb_ranks_idx, noun_ranks_idx =", "def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set in ['seen', 'unseen']: if", "prior = None results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids", "res = softmax(np.array([0, 200, 10])) >>> np.sum(res) 1.0 >>> np.all(np.abs(res - np.array([0, 1,", "('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for", "10], [0, 10, 200]])) >>> np.sum(res, axis=1) array([ 1., 1.]) ''' if x.ndim", "softmax(top_noun_scores) action_probs_matrix = top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0]", "entries.append({str(cls): float(score) for cls, score in zip(classes, segment_scores)}) return entries def compute_score_dicts(results, prior):", "uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json / (test_set + '.json'), prior) if __name__", "max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1]", "for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i] = str(idx) dump_scores_to_json(results, uids, args.submission_json /", "action_ranks[:, :n]]) def action_scores_to_json(actions, scores, prior): entries = [] for verbs, nouns, segment_scores", "+ test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i,", "(verb, noun) in prior else 0.0) * float(score) for verb, noun, score in", "noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0) * float(score) for", "noun)]) if (verb, noun) in prior else 0.0) * float(score) for verb, noun,", "= {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)]", "import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from results pickle\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument(\"results_dir\",", "if x.ndim == 1: x = x.reshape((1, -1)) max_x = np.max(x, axis=1).reshape((-1, 1))", "array([ 1., 1.]) ''' if x.ndim == 1: x = x.reshape((1, -1)) max_x", "top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores = top_scores(verb_scores) top_nouns,", "filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict, f) return results_dict def", "segment_action_scores_dict in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict,", "float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)}) else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb,", "1.]) >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]])) >>> np.sum(res, axis=1)", "prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in zip(verbs, nouns,", "<filename>submission_json.py import json from pathlib import Path import numpy as np import pandas", "top_scores(verb_scores) top_nouns, top_noun_scores = top_scores(noun_scores) top_verb_probs = softmax(top_verb_scores) top_noun_probs = softmax(top_noun_scores) action_probs_matrix =", "1, 0])) < 0.0001) True >>> res = softmax(np.array([[0, 200, 10], [0, 10,", "len(verb_scores.shape) == 4: verb_scores = verb_scores.mean(axis=(1, 2)) noun_scores = results['scores']['noun'] if len(noun_scores.shape) ==", "segment_scores)}) return entries def compute_score_dicts(results, prior): verb_scores = results['scores']['verb'] if len(verb_scores.shape) == 4:", "to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True) with open(filepath, 'w', encoding='utf8') as f: json.dump(results_dict,", "scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in", "= top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks =", "def to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict", "as np import pandas as pd from epic_kitchens.meta import training_labels, test_timestamps def softmax(x):", "10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res = softmax(np.array([[0, 200,", "else: prior = None results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl'))", "d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior = prior_action else: prior =", "top_verb_probs[:, :n, np.newaxis] * top_noun_probs[:, np.newaxis, :] instance_count = action_probs_matrix.shape[0] action_ranks = action_probs_matrix.reshape(instance_count,", "verb_ranks_idx, noun_ranks_idx = np.unravel_index(action_ranks[:, :n], dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back", "is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score in zip(verbs, nouns, segment_scores)})", "[0, 10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1., 1.])", "return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results, uids, filepath,", "dims=(action_probs_matrix.shape[1:])) # TODO: Reshape, argsort, then convert back to verb/noun indices segments =", "segment_action_scores_dict } return { 'version': '0.1', 'challenge': 'action_recognition', 'results': entries, } def dump_scores_to_json(results,", "in zip(uids, verb_scores_dict, noun_scores_dict, action_scores_dict): entries[str(uid)] = { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action':", "[] for verbs, nouns, segment_scores in zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb,", "array([ 1., 1., 1.]) >>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]]))", "len(scores)).reshape(-1, 1), top_n_scores_idx] return top_n_scores_idx, top_n_scores def compute_action_scores(verb_scores, noun_scores, n=100): top_verbs, top_verb_scores =", "= { 'verb': segment_verb_scores_dict, 'noun': segment_noun_scores_dict, 'action': segment_action_scores_dict } return { 'version': '0.1',", "noun_scores_dict, action_scores_dict): entries = {} for uid, segment_verb_scores_dict, segment_noun_scores_dict, segment_action_scores_dict in zip(uids, verb_scores_dict,", "10], [0, 10, 200], [200, 0, 10]])) >>> np.sum(res, axis=1) array([ 1., 1.,", "- max_x) return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:,", "zip(*actions, scores): if prior is None: entries.append({\"{},{}\".format(verb, noun): float(score) for verb, noun, score", "action_scores = compute_action_scores(verb_scores, noun_scores) verb_scores_dict = scores_to_json(verb_scores) noun_scores_dict = scores_to_json(noun_scores) action_scores_dict = action_scores_to_json(actions,", "== 'unseen': action_counts = training_labels().apply(lambda d: (d['verb_class'], d['noun_class']), axis=1).value_counts() prior_action = action_counts.div(action_counts.sum()) prior", "axis=1).reshape((-1, 1)) def top_scores(scores): top_n_scores_idx = np.argsort(scores)[:, ::-1] top_n_scores = scores[np.arange(0, len(scores)).reshape(-1, 1),", "= np.zeros(results['scores']['verb'].shape[0], dtype=np.int) timestamps = test_timestamps(test_set) for i, (idx, row) in enumerate(timestamps.iterrows()): uids[i]", ">>> np.sum(res, axis=1) array([ 1., 1., 1.]) >>> res = softmax(np.array([[0, 200, 10],", "else: entries.append({\"{},{}\".format(verb, noun): (float(prior[(verb, noun)]) if (verb, noun) in prior else 0.0) *", "json.dump(results_dict, f) return results_dict def main(args): if not args.submission_json.exists(): args.submission_json.mkdir(parents=True, exist_ok=True) for test_set", "noun_scores_dict, action_scores_dict = compute_score_dicts(results, prior) results_dict = to_json(uids, verb_scores_dict, noun_scores_dict, action_scores_dict) filepath.parent.mkdir(exist_ok=True, parents=True)", "None results = pd.read_pickle(args.results_dir / ('test_' + test_set + '.pkl')) uids = np.zeros(results['scores']['verb'].shape[0],", "if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description=\"Produce submission JSON from" ]
[ "placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info']", "in response['Items']: Placement_ID = item['ID'] #we got placement id placement = {} #####################", "= [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\": 500,", "yield on the line with super(...), # which wouldn't work (see my comment", "in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates ############ WE GOT", "line with super(...), # which wouldn't work (see my comment below), so... return", "500, \"body\": json.dumps('Internal Server Error - No matching skills for placement') } else:", "(see my comment below), so... return (str(o) for o in [o]) return super(DecimalEncoder,", "#extract id for item in response['Items']: Placement_ID = item['ID'] #we got placement id", "item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs table", "placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits']", "but that would mean a yield on the line with super(...), # which", "id placement = {} ##################### Add attributes from Jobs TAble ########################## placement['Company_ID'] =", "default(self, o): if isinstance(o, decimal.Decimal): # wanted a simple yield str(o) in the", "from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): #", "table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates for placement\") else: for attribute", "######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] ==", "db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] = skills #################### Add attributes from", "for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2')", "db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits #################### Add attributes from", "#add skills to placement placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES FOR", "[] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates for placement\")", "Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs') #scan for given", "import boto3 import time import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import", "= item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add", "isinstance(o, decimal.Decimal): # wanted a simple yield str(o) in the next line, #", "placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs table #########################", "ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): # wanted a simple", "placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] =", "given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = []", "int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description']", "print(\"No item was found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server error-", "placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree']", "class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): # wanted a simple yield", "dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return {", "#scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements", "'body': json.dumps('Internal server error- No item found in contact_student table *-*') } else:", "for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements =", "== 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching", "boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if", "benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\":", "item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name']", "else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates", "table = dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0:", "'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id'])", "= benefits #################### Add attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates", "with super(...), # which wouldn't work (see my comment below), so... return (str(o)", "FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return { \"statusCode\": 200,", "lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ### GET", "Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title']", "placement id placement = {} ##################### Add attributes from Jobs TAble ########################## placement['Company_ID']", "responseBody = { } placements = [] if response['Count'] == 0: #no matching", "placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3", "\"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching skills for placement') }", "attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates = [] db_response =", "placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT #################", "from Benefits table ################# table = dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))", "GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return", "0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching skills", "if db_response['Count'] == 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error -", "### GET CONTACT ### table = dynamodb.Table('Jobs') #scan for given student id response", "found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server error- No item found", "= item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] =", "line, # but that would mean a yield on the line with super(...),", "event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table =", "int(item['salaries']) ################3 Add attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills =", "the next line, # but that would mean a yield on the line", "from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name']", "comment below), so... return (str(o) for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb", "def default(self, o): if isinstance(o, decimal.Decimal): # wanted a simple yield str(o) in", "# wanted a simple yield str(o) in the next line, # but that", "placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location']", "[o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context):", "import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def", ":/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server error- No item found in", "was found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server error- No item", "placement placement['benefits'] = benefits #################### Add attributes from Certificates_job table ################# table =", "certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates", "\"statusCode\": 500, 'body': json.dumps('Internal server error- No item found in contact_student table *-*')", "print(\"No matching certificates for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills", "table ################# table = dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count']", "DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): # wanted a simple yield str(o)", "= dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return", "= item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs table ######################### table", "skills to placement placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES FOR A", "company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs') #scan", "Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal):", "import time import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class", "str(o) in the next line, # but that would mean a yield on", "got placement id placement = {} ##################### Add attributes from Jobs TAble ##########################", "} else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] =", "db_response['Count'] == 0: print(\"No matching certificates for placement\") else: for attribute in db_response['Items']:", "Server Error - No matching benefits for placement') } else: for attribute in", "A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return { \"statusCode\": 200, \"body\":", "############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] =", "TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] =", "= boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company", "{ \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching skills for placement')", "on the line with super(...), # which wouldn't work (see my comment below),", "= [] if response['Count'] == 0: #no matching item print(\"No item was found", "skills #################### Add attributes from Benefits table ################# table = dynamodb.Table('Benefits') benefits =", "return { \"statusCode\": 500, 'body': json.dumps('Internal server error- No item found in contact_student", "next line, # but that would mean a yield on the line with", "region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID", "if db_response['Count'] == 0: print(\"No matching certificates for placement\") else: for attribute in", "from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))", "dynamodb.Table('Jobs') #scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { }", "= table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = [] if response['Count'] == 0:", "= int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] =", "= dynamodb.Table('Jobs') #scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = {", "attributes from Benefits table ################# table = dynamodb.Table('Benefits') benefits = [] db_response =", "= int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] =", "ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return {", "from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID))", "*-*') } else: #extract id for item in response['Items']: Placement_ID = item['ID'] #we", "return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching skills for", "placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category']", "= {} ##################### Add attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID']", "in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event,", "contact_student table *-*') } else: #extract id for item in response['Items']: Placement_ID =", "db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates for placement\") else:", "context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT", "WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements", "skills.append(attribute['name']) #add skills to placement placement['skills'] = skills #################### Add attributes from Benefits", "int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs') #scan for given student id", "500, 'body': json.dumps('Internal server error- No item found in contact_student table *-*') }", "boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id", "Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if", "certificates ############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements']", "#get company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs')", "attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits #################### Add", "attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] = skills #################### Add", "server error- No item found in contact_student table *-*') } else: #extract id", "#add skills to placement placement['benefits'] = benefits #################### Add attributes from Certificates_job table", "that would mean a yield on the line with super(...), # which wouldn't", "def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ###", "mean a yield on the line with super(...), # which wouldn't work (see", "Add attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name']", "Placement_ID = item['ID'] #we got placement id placement = {} ##################### Add attributes", "GET CONTACT ### table = dynamodb.Table('Jobs') #scan for given student id response =", "{ } placements = [] if response['Count'] == 0: #no matching item print(\"No", "placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from", "#we got placement id placement = {} ##################### Add attributes from Jobs TAble", "################# table = dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] ==", "= item['ID'] #we got placement id placement = {} ##################### Add attributes from", "in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] = skills #################### Add attributes", "return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching benefits for", "# which wouldn't work (see my comment below), so... return (str(o) for o", "Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if", "for item in response['Items']: Placement_ID = item['ID'] #we got placement id placement =", "placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name']", "to placement placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES FOR A SINGLE", "would mean a yield on the line with super(...), # which wouldn't work", "== 0: #no matching item print(\"No item was found :/\") return { \"statusCode\":", "\"body\": json.dumps('Internal Server Error - No matching benefits for placement') } else: for", "= event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table", "placement = {} ##################### Add attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID'])", "dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching", "in contact_student table *-*') } else: #extract id for item in response['Items']: Placement_ID", "#add skills to placement placement['skills'] = skills #################### Add attributes from Benefits table", "queryStringParameters = event['queryStringParameters'] #get company id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ###", "= int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs') #scan for given student", "500, \"body\": json.dumps('Internal Server Error - No matching benefits for placement') } else:", "below), so... return (str(o) for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb =", "error- No item found in contact_student table *-*') } else: #extract id for", "botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): # wanted", "skills for placement') } else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to", "for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits ####################", "item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs table ######################### table =", "o): if isinstance(o, decimal.Decimal): # wanted a simple yield str(o) in the next", "for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates']", "item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries'])", "placement['benefits'] = benefits #################### Add attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job')", "\"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching benefits for placement') }", "from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o):", "matching certificates for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to", "wouldn't work (see my comment below), so... return (str(o) for o in [o])", "for placement') } else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement", "{ \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching benefits for placement')", "json.dumps('Internal Server Error - No matching benefits for placement') } else: for attribute", "yield str(o) in the next line, # but that would mean a yield", "certificates for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement", "json import boto3 import time import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions", "item print(\"No item was found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server", "placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description']", "super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters =", "id Company_ID = int(queryStringParameters['id']) ### GET CONTACT ### table = dynamodb.Table('Jobs') #scan for", "to placement placement['skills'] = skills #################### Add attributes from Benefits table ################# table", "found in contact_student table *-*') } else: #extract id for item in response['Items']:", "my comment below), so... return (str(o) for o in [o]) return super(DecimalEncoder, self).default(o)", "item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree']", "if isinstance(o, decimal.Decimal): # wanted a simple yield str(o) in the next line,", "placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs')", "item in response['Items']: Placement_ID = item['ID'] #we got placement id placement = {}", "table *-*') } else: #extract id for item in response['Items']: Placement_ID = item['ID']", "0: #no matching item print(\"No item was found :/\") return { \"statusCode\": 500,", "table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error", "benefits #################### Add attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates =", "table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = [] if response['Count'] == 0: #no", "return (str(o) for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name", "response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = [] if response['Count'] ==", "= [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates for", "if response['Count'] == 0: #no matching item print(\"No item was found :/\") return", "{} ##################### Add attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] =", "placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries']", "0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No matching benefits", "self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters']", "# but that would mean a yield on the line with super(...), #", "item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location']", "response['Items']: Placement_ID = item['ID'] #we got placement id placement = {} ##################### Add", "ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return { \"statusCode\":", "attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] =", "################3 Add attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills = []", "a yield on the line with super(...), # which wouldn't work (see my", "- No matching benefits for placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name'])", "for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] = skills ####################", "int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] = item['category']", "db_response['Count'] == 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server Error - No", "attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response =", "id for item in response['Items']: Placement_ID = item['ID'] #we got placement id placement", "placement placement['skills'] = skills #################### Add attributes from Benefits table ################# table =", "- No matching skills for placement') } else: for attribute in db_response['Items']: skills.append(attribute['name'])", "else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills'] = skills", "Add attributes from Benefits table ################# table = dynamodb.Table('Benefits') benefits = [] db_response", "################# table = dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] ==", "No item found in contact_student table *-*') } else: #extract id for item", "### table = dynamodb.Table('Jobs') #scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody", "the line with super(...), # which wouldn't work (see my comment below), so...", "so... return (str(o) for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb',", "Add attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response", "matching skills for placement') } else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills", "import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o,", "[] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\": 500, \"body\":", "table ######################### table = dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count']", "Benefits table ################# table = dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if", "= dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return", "in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits #################### Add attributes", "skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\":", "table = dynamodb.Table('Benefits') benefits = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0:", "SINGLE PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return { \"statusCode\": 200, \"body\": json.dumps(responseBody)", "##################### Add attributes from Jobs TAble ########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID'])", "response['Count'] == 0: #no matching item print(\"No item was found :/\") return {", "return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters", "[] if response['Count'] == 0: #no matching item print(\"No item was found :/\")", "item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info']", "db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates ############ WE GOT ALL", "{ \"statusCode\": 500, 'body': json.dumps('Internal server error- No item found in contact_student table", "simple yield str(o) in the next line, # but that would mean a", "= 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get company id Company_ID =", "No matching skills for placement') } else: for attribute in db_response['Items']: skills.append(attribute['name']) #add", "json.dumps('Internal server error- No item found in contact_student table *-*') } else: #extract", "#################### Add attributes from Benefits table ################# table = dynamodb.Table('Benefits') benefits = []", "item['ID'] #we got placement id placement = {} ##################### Add attributes from Jobs", "Error - No matching benefits for placement') } else: for attribute in db_response['Items']:", "boto3 import time import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError", "certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES", "placement['skills'] = skills #################### Add attributes from Benefits table ################# table = dynamodb.Table('Benefits')", "matching item print(\"No item was found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal", "\"body\": json.dumps('Internal Server Error - No matching skills for placement') } else: for", "attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates ############ WE", "o in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def", "table = dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0:", "= skills #################### Add attributes from Benefits table ################# table = dynamodb.Table('Benefits') benefits", "for placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement", "else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits", "0: print(\"No matching certificates for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name']) #add", "} placements = [] if response['Count'] == 0: #no matching item print(\"No item", "= item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] =", "} else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to placement placement['benefits'] =", "PLACEMENT ################# placements.append(placement) responseBody['placements'] = placements return { \"statusCode\": 200, \"body\": json.dumps(responseBody) }", "else: #extract id for item in response['Items']: Placement_ID = item['ID'] #we got placement", "= table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No matching certificates for placement\") else: for", "= { } placements = [] if response['Count'] == 0: #no matching item", "} else: #extract id for item in response['Items']: Placement_ID = item['ID'] #we got", "db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal", "decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder): def default(self,", "= table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return { \"statusCode\": 500, \"body\": json.dumps('Internal Server", "Add attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates = [] db_response", "item['department_name'] placement['degree'] = item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes", "== 0: print(\"No matching certificates for placement\") else: for attribute in db_response['Items']: certificates.append(attribute['name'])", "= item['Company_name'] placement['title'] = item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] =", "skills to placement placement['benefits'] = benefits #################### Add attributes from Certificates_job table #################", "import json import boto3 import time import decimal from boto3.dynamodb.conditions import Key,Attr from", "time import decimal from boto3.dynamodb.conditions import Key,Attr from botocore.exceptions import ClientError class DecimalEncoder(json.JSONEncoder):", "item found in contact_student table *-*') } else: #extract id for item in", "= item['degree'] placement['location'] = item['location'] placement['salaries'] = int(item['salaries']) ################3 Add attributes from Skills_jobs", "skills to placement placement['skills'] = skills #################### Add attributes from Benefits table #################", "#################### Add attributes from Certificates_job table ################# table = dynamodb.Table('Certificates_job') certificates = []", "Server Error - No matching skills for placement') } else: for attribute in", "placement') } else: for attribute in db_response['Items']: skills.append(attribute['name']) #add skills to placement placement['skills']", "which wouldn't work (see my comment below), so... return (str(o) for o in", "<gh_stars>0 import json import boto3 import time import decimal from boto3.dynamodb.conditions import Key,Attr", "json.dumps('Internal Server Error - No matching skills for placement') } else: for attribute", "= certificates ############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT ################# placements.append(placement)", "CONTACT ### table = dynamodb.Table('Jobs') #scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID))", "student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = [] if", "a simple yield str(o) in the next line, # but that would mean", "#no matching item print(\"No item was found :/\") return { \"statusCode\": 500, 'body':", "dynamodb.Table('Skills_jobs') skills = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: return {", "table ################# table = dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count']", "work (see my comment below), so... return (str(o) for o in [o]) return", "No matching benefits for placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add", "(str(o) for o in [o]) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name =", "in the next line, # but that would mean a yield on the", "benefits for placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills to", "########################## placement['Company_ID'] = int(item['Company_ID']) placement['Placement_ID'] = int(item['ID']) placement['Company_name'] = item['Company_name'] placement['title'] = item['title']", "placement placement['certificates'] = certificates ############ WE GOT ALL ATTRIBUTES FOR A SINGLE PLACEMENT", "item was found :/\") return { \"statusCode\": 500, 'body': json.dumps('Internal server error- No", "dynamodb = boto3.resource('dynamodb', region_name = 'us-east-2') def lambda_handler(event, context): queryStringParameters = event['queryStringParameters'] #get", "to placement placement['benefits'] = benefits #################### Add attributes from Certificates_job table ################# table", "wanted a simple yield str(o) in the next line, # but that would", "for attribute in db_response['Items']: certificates.append(attribute['name']) #add skills to placement placement['certificates'] = certificates ############", "= dynamodb.Table('Certificates_job') certificates = [] db_response = table.scan(FilterExpression=Attr('Jobs_Company_ID').eq(Company_ID)&Attr('Jobs_ID').eq(Placement_ID)) if db_response['Count'] == 0: print(\"No", "decimal.Decimal): # wanted a simple yield str(o) in the next line, # but", "matching benefits for placement') } else: for attribute in db_response['Items']: benefits.append(attribute['name']) #add skills", "id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody = { } placements = [] if response['Count']", "benefits.append(attribute['name']) #add skills to placement placement['benefits'] = benefits #################### Add attributes from Certificates_job", "table = dynamodb.Table('Jobs') #scan for given student id response = table.scan(FilterExpression=Attr('Company_ID').eq(Company_ID)) responseBody =", "import ClientError class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): # wanted a", "Error - No matching skills for placement') } else: for attribute in db_response['Items']:", "= item['title'] placement['description'] = item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] =", "placements = [] if response['Count'] == 0: #no matching item print(\"No item was", "= item['description'] placement['category'] = item['category'] placement['contact_info'] = item['contact_info'] placement['department_name'] = item['department_name'] placement['degree'] =", "super(...), # which wouldn't work (see my comment below), so... return (str(o) for", "= int(item['salaries']) ################3 Add attributes from Skills_jobs table ######################### table = dynamodb.Table('Skills_jobs') skills" ]
[ "imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK", "# print(params['image']) request_url = request_url + \"?access_token=\" + access_token headers = {'content-type': 'application/json'}", "base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image", "np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img:", "= requests.post(request_url, data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] !=", "requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype", "# imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result", "params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}' # print(params['image'])", "[] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width =", "= 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url", "= base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params =", "wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath,", "== 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data']", "+ pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0,", "+ \"?access_token=\" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers)", "'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name", "pos[1] rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx,", "import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp =", "imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image)", "= FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search') # cv2.imshow('image', result) #", "== 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num = 0", "rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy),", "if imgtype == 'Local': with open(img, \"rb\") as f: # 转为二进制格式 base64_data =", "'FacesNum': num, 'FullFace': None} return message if __name__ == \"__main__\": # imgpath =", "'FaceDetector')) import requests import base64 import cv2 import numpy as np import urllib.request", "# 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' +", "num = 0 for pos in face_List: lefttopx = pos[0] lefttopy = pos[1]", "image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img: str, imgtype:", "= requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if", "cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos == 'Example':", "# client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response:", "client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: #", "search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code']", "'{' + params + '}' # print(params['image']) request_url = request_url + \"?access_token=\" +", "'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search')", "= response.json()['result']['face_num'] face_List = [] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top", "response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code']", "if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype ==", "# cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] =", "as np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url:", "response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for num in range(face_number): face_loc_left =", "facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host =", "= \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\") as f: # 转为二进制格式", "print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error", "'.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath =", "full_face_path} elif imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') #", "return [] def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message", "1 message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if", "\"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\") as f: # 转为二进制格式 base64_data", "os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy,", "= int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype", "return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for", "print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img,", "if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath =", "numpy as np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if", "lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy),", "= request_url + \"?access_token=\" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url,", "'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path}", "imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result =", "'example_face_' + str(num) + '.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy),", "face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img) elif imgtype ==", "if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image,", "lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message']", "255, 0), 2) if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_'", "params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}' elif imgtype", "search_all_path = [] num = 0 for pos in face_List: lefttopx = pos[0]", "pos[0] lefttopy = pos[1] rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy +", "= {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path", "= cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img: str, imgtype: str,", "+ params + '}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params", "client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token']) access_token", "= '{' + params + '}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\",", "num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height", "fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()),", "if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] =", "num, 'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace',", "'.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error", "base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params +", "response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\") as f:", "'{' + params + '}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img)", "response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image)", "request_url = request_url + \"?access_token=\" + access_token headers = {'content-type': 'application/json'} response =", "response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code']", "== 'Local': image = cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img) #", "该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image =", "# 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>'", "face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local':", "int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image =", "urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else:", "encoding:utf-8 import os, sys basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64", "imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic',", "= pos[1] rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy + pos[3] #", "for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width'])", "\"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}' elif imgtype == 'url':", "base64 import cv2 import numpy as np import urllib.request import base64 def fetchImageFromHttp(image_url,", "'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return", "lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code'] message['Error Message']", "str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg',", "message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List", "sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2 import numpy as np import", "'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url')", "with open(img, \"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data", "params + '}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params =", "= lefttopx + pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx,", "= response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\") as", "= response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path,", "{'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path =", "str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) #", "imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') #", "(rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos == 'Example': savepath = os.path.join(basepath,", "return message if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2'", "headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json())", "# 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error Message'] =", "(response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error Message']", "'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}' #", "'Local': with open(img, \"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密", "提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg']", "+ '}' # print(params['image']) request_url = request_url + \"?access_token=\" + access_token headers =", "# raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for num in", "fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num = 0 for pos in face_List:", "= 0 for pos in face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx", "image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR)", "= int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image", "int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width,", "str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy,", "cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code'] message['Error Message'] =", "elif imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path,", "data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error", "== 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces':", "FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search') # cv2.imshow('image', result) # cv2.waitKey(0)", "image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message if __name__", "import numpy as np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片", "in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height =", "= 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath,", "request_url + \"?access_token=\" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params,", "\\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}' # print(params['image']) request_url = request_url", "转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data)", "requests import base64 import cv2 import numpy as np import urllib.request import base64", "params = '{' + params + '}' elif imgtype == 'url': params =", "imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params +", "response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path", "image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search':", "= base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}'", "使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params", "num, 'FullFace': None} return message if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg'", "= {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) #", "response = requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\"", "# cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message", "rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos", "'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos", "== 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace',", "+ access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response:", "open(img, \"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data =", "search_all_path, 'FacesNum': num, 'FullFace': None} return message if __name__ == \"__main__\": # imgpath", "imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] =", "request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\") as f: #", "cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = []", "params = '{' + params + '}' # print(params['image']) request_url = request_url +", "Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg')", "lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos == 'Example': savepath =", "message if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath", "= lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx,", "= response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None return message # raise", "fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search') #", "== \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath)", "return image else: return [] def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120):", "+ '.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath", "(lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos == 'Example': savepath", "import base64 import cv2 import numpy as np import urllib.request import base64 def", "__name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg'", "'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] =", "np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp", "{'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息", "= ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg')", "image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return []", "str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id", "0), 2) if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' +", "access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with open(img, \"rb\")", "print(params['image']) request_url = request_url + \"?access_token=\" + access_token headers = {'content-type': 'application/json'} response", "int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype ==", "= response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example':", "requests.post(request_url, data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0:", "= message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic',", "os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num,", "'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx),", "cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img: str, imgtype: str, imgpos: str,", "urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url,", "'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None}", "!= 0: message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None", "base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}' elif", "为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token'])", "图片编号起始 search_all_path = [] num = 0 for pos in face_List: lefttopx =", "raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for num in range(face_number):", "message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search': #", "'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos == 'Search': pos_name =", "str, facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host", "+ pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image,", "str(num) + '.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)])", "image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error", "'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos", "= [] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width", "pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255,", "0 for pos in face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx =", "dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img: str,", "\"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}' # print(params['image']) request_url =", "= 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local',", "= np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def", "cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code']", "Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for num in range(face_number): face_loc_left", "= response.json()['error_msg'] message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number =", "os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2 import numpy as np", "face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx = lefttopx + pos[2] rightbottomy", "face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img) elif imgtype", "# 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\",", "{'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message if __name__ == \"__main__\": #", "\\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}' elif imgtype == 'url': params", "= os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num,", "= urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image", "'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if", "str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret", "import os, sys basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import", "= cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path =", "response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] =", "message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos", "\"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode()", "pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx,", "[] num = 0 for pos in face_List: lefttopx = pos[0] lefttopy =", "lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if", "= [] num = 0 for pos in face_List: lefttopx = pos[0] lefttopy", "message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None return message", "face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img)", "full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum':", "'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif", "# print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local': with", "result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search') # cv2.imshow('image', result)", "if response.json()['error_code'] != 0: message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data']", "timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return", "f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params =", "这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img) elif imgtype == 'url': image", "message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace',", "'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num = 0 for", "imgtype == 'Local': with open(img, \"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read())", "+= 1 message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg']", "'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath,", "def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image =", "'Local': image = cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始", "import cv2 import numpy as np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1):", "if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) +", "pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num +=", "os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos == 'Search': pos_name", "Message'] = response.json()['error_msg'] message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number", "message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response =", "(0, 255, 0), 2) if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace',", "rightbottomy), (0, 255, 0), 2) if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic',", "elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath,", "response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url = \"https://aip.baidubce.com/rest/2.0/face/v3/detect\" if imgtype == 'Local':", "response.json()['result']['face_num'] face_List = [] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top =", "face_number = response.json()['result']['face_num'] face_List = [] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left'])", "face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left,", "= {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host)", "if imgpos == 'Example': full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data']", "== 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}'", "if imgtype == 'Local': image = cv2.imread(img) elif imgtype == 'url': image =", "'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace':", "# 这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img) elif imgtype == 'url':", "{} # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if", "+ '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1", "face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if imgtype == 'Local': image = cv2.imread(img) elif", "'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg')", "Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path = os.path.join(basepath,", "cv2.imdecode(image, cv2.IMREAD_COLOR) return image else: return [] def FaceExtract(img: str, imgtype: str, imgpos:", "import base64 def fetchImageFromHttp(image_url, timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s)", "= \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params + '}' # print(params['image']) request_url", "'FullFace': None} return message if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath", "'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token'] request_url =", "face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框 if", "\"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath = 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) #", "# 图片编号起始 search_all_path = [] num = 0 for pos in face_List: lefttopx", "= fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num = 0 for pos in", "0: message['Error Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None return", "response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'],", "response.json()['error_msg'] message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num']", "image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num += 1 message['Error Code'] = response.json()['error_code'] message['Error", "# 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image", "# result = FaceExtract(imgpath, 'url') # result = FaceExtract(imgpath, 'Local', 'Search') # cv2.imshow('image',", "2) if imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num)", "cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos ==", "Code'] = response.json()['error_code'] message['Error Message'] = response.json()['error_msg'] message['Data'] = None return message #", "= None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List =", "= \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{' + params + '}' elif imgtype ==", "lefttopx + pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy, rightbottomx, rightbottomy)", "else: return [] def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸", "Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos ==", "'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx]) num", "os, sys basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2", "为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token']) access_token =", "imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num =", "'}' # print(params['image']) request_url = request_url + \"?access_token=\" + access_token headers = {'content-type':", "lefttopy = pos[1] rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy + pos[3]", "rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2) if imgpos ==", "= os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2 import numpy as", "+ params + '}' # print(params['image']) request_url = request_url + \"?access_token=\" + access_token", "pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name +", "'}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' +", "None} return message if __name__ == \"__main__\": # imgpath = 'http://xinan.ziqiang.net.cn/ThreeFace.jpeg' imgpath =", "= pos[0] lefttopy = pos[1] rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy", "imgpos == 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg')", "print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0), 2)", "# full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces':", "savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx])", "elif imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num", "# print(lefttopx, lefttopy, rightbottomx, rightbottomy) cv2.rectangle(image, (lefttopx, lefttopy), (rightbottomx, rightbottomy), (0, 255, 0),", "imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK,", "lefttopx = pos[0] lefttopy = pos[1] rightbottomx = lefttopx + pos[2] rightbottomy =", "该函数的作用是提取图中人脸 message = {} # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response", "elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{' + params", "== 'Local': with open(img, \"rb\") as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) #", "num += 1 message['Error Code'] = response.json()['error_code'] message['Error Message'] = message['Error Message'] =", "','.join([str(lefttopx), str(lefttopy), str(rightbottomx), str(rightbottomy)]) savepath = os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath)", "savepath, 'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path = os.path.join(basepath,", "= int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top,", "\"?access_token=\" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if", "+ str(num) + '.jpg') elif imgpos == 'Search': pos_name = ','.join([str(lefttopx), str(lefttopy), str(rightbottomx),", "def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {}", "message['Error Message'] = message['Error Message'] = response.json()['error_msg'] if imgpos == 'Example': full_face_path =", "resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\") image = cv2.imdecode(image, cv2.IMREAD_COLOR) return", "full_face_path = os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path,", "= os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos == 'Search':", "= os.path.join(basepath, 'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath,", "'FaceStatic', 'SearchFace', pos_name + '.jpg') search_all_path.append(savepath) # cv2.imwrite(\"C:/WorkSpace/test/detect_face_\"+str(num)+'.jpg', image[lefttopy:rightbottomy, lefttopx:rightbottomx]) cv2.imwrite(savepath, image[lefttopy:rightbottomy, lefttopx:rightbottomx])", "image = fetchImageFromHttp(img) # 图片编号起始 search_all_path = [] num = 0 for pos", "== 'Example': savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif", "message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message if __name__ ==", "= os.path.join(basepath, 'FaceStatic', 'FullFace', 'Search.jpg') # cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum':", "[] def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message =", "message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = [] for num", "None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg'])) face_number = response.json()['result']['face_num'] face_List = []", "image else: return [] def FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): #", "in face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx = lefttopx + pos[2]", "message['Error Message'] = response.json()['error_msg'] message['Data'] = None return message # raise Exception('人脸检测失败,失败码为{},失败信息为:{}'.format(response.json()['error_code'], response.json()['error_msg']))", "FaceExtract(img: str, imgtype: str, imgpos: str, facenum=120): # 该函数的作用是提取图中人脸 message = {} #", "= '{' + params + '}' # print(params['image']) request_url = request_url + \"?access_token=\"", "sys basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2 import", "'FacesNum': num, 'FullFace': full_face_path} elif imgpos == 'Search': # full_face_path = os.path.join(basepath, 'FaceStatic',", "pos in face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx = lefttopx +", "os.path.join(basepath, 'FaceStatic', 'FullFace', 'Result.jpg') cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': savepath, 'FacesNum': num, 'FullFace':", "headers=headers) if response: print (response.json()) # 提取检测到的所有人脸信息 if response.json()['error_code'] != 0: message['Error Code']", "import requests import base64 import cv2 import numpy as np import urllib.request import", "basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import base64 import cv2 import numpy", "face_List = [] for num in range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top'])", "# encoding:utf-8 import os, sys basepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(basepath, 'FaceDetector')) import requests import", "imgtype == 'Local': image = cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img)", "= int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) #", "+ '}' elif imgtype == 'url': params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"URL\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(img) params = '{'", "= 'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result", "as f: # 转为二进制格式 base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params", "= {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message if __name__ == \"__main__\":", "base64_data = base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params", "savepath = os.path.join(basepath, 'FaceStatic', 'ExampleFace', 'example_face_' + str(num) + '.jpg') elif imgpos ==", "access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print", "range(face_number): face_loc_left = int(response.json()['result']['face_list'][num]['location']['left']) face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height'])", "timeout_s=1): # 该函数是读取url图片 if image_url: resp = urllib.request.urlopen(image_url, timeout=timeout_s) image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")", "host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=DXN8o5eNaheZahxK558I8GOs&client_secret=<KEY>' response = requests.get(host) if response: # print(response.json()['access_token']) access_token = response.json()['access_token']", "params + '}' # print(params['image']) request_url = request_url + \"?access_token=\" + access_token headers", "image = cv2.imread(img) elif imgtype == 'url': image = fetchImageFromHttp(img) # 图片编号起始 search_all_path", "face_loc_top = int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height])", "rightbottomx = lefttopx + pos[2] rightbottomy = lefttopy + pos[3] # print(lefttopx, lefttopy,", "cv2.imwrite(full_face_path, image) message['Data'] = {'ExampleFaces': search_all_path, 'FacesNum': num, 'FullFace': None} return message if", "cv2 import numpy as np import urllib.request import base64 def fetchImageFromHttp(image_url, timeout_s=1): #", "for pos in face_List: lefttopx = pos[0] lefttopy = pos[1] rightbottomx = lefttopx", "base64.b64encode(f.read()) # 使用base64进行加密 base64_data = base64_data.decode() params = \"\\\"image\\\":\\\"{}\\\",\\\"image_type\\\":\\\"BASE64\\\", \\\"max_face_num\\\":\\\"120\\\"\".format(base64_data) params = '{'", "'http://xinan.ziqiang.net.cn/Fq-PpUCF25C61q0muvXAHCok0uK2' wycpath = 'http://xinan.ziqiang.net.cn/AsFace.jpg' fetchImageFromHttp(wycpath) # result = FaceExtract(imgpath, 'url') # result =", "int(response.json()['result']['face_list'][num]['location']['top']) face_loc_width = int(response.json()['result']['face_list'][num]['location']['width']) face_loc_height = int(response.json()['result']['face_list'][num]['location']['height']) face_List.append([face_loc_left, face_loc_top, face_loc_width, face_loc_height]) # 这里是读取图像并画框" ]
[ "# P = 1000 r = 2.5 # % n = 4 #", "*= (1 + r/100/n) # r/100 since r is in % print('Amount (", "4 # quarterly y = 5 amount = P print('Amount (Starting):', amount) for", "principal # r - annual rate (%) # n - compounding frequency #", "** (ny) # P = 1000 r = 2.5 # % n =", "= 1000 r = 2.5 # % n = 4 # quarterly y", "r is in % print('Amount ( year:', year, ', period:', period, ') :',", "in % print('Amount ( year:', year, ', period:', period, ') :', round(amount, 2))", "y - number of years # # Py = P0 (1 + r/n)", "- annual rate (%) # n - compounding frequency # y - number", "compounding frequency # y - number of years # # Py = P0", "annual rate (%) # n - compounding frequency # y - number of", "P print('Amount (Starting):', amount) for year in range(1, y+1): for period in range(1,", "= P print('Amount (Starting):', amount) for year in range(1, y+1): for period in", "# y - number of years # # Py = P0 (1 +", "compound interest # # P - principal # r - annual rate (%)", "r = 2.5 # % n = 4 # quarterly y = 5", "# r/100 since r is in % print('Amount ( year:', year, ', period:',", "P - principal # r - annual rate (%) # n - compounding", "- number of years # # Py = P0 (1 + r/n) **", "# quarterly y = 5 amount = P print('Amount (Starting):', amount) for year", "(%) # n - compounding frequency # y - number of years #", "# % n = 4 # quarterly y = 5 amount = P", "for year in range(1, y+1): for period in range(1, n+1): amount *= (1", "r/100 since r is in % print('Amount ( year:', year, ', period:', period,", "amount) for year in range(1, y+1): for period in range(1, n+1): amount *=", "number of years # # Py = P0 (1 + r/n) ** (ny)", "P = 1000 r = 2.5 # % n = 4 # quarterly", "= 4 # quarterly y = 5 amount = P print('Amount (Starting):', amount)", "y = 5 amount = P print('Amount (Starting):', amount) for year in range(1,", "print('Amount (Starting):', amount) for year in range(1, y+1): for period in range(1, n+1):", "# # compound interest # # P - principal # r - annual", "amount = P print('Amount (Starting):', amount) for year in range(1, y+1): for period", "quarterly y = 5 amount = P print('Amount (Starting):', amount) for year in", "in range(1, n+1): amount *= (1 + r/100/n) # r/100 since r is", "# n - compounding frequency # y - number of years # #", "# Py = P0 (1 + r/n) ** (ny) # P = 1000", "n+1): amount *= (1 + r/100/n) # r/100 since r is in %", "r - annual rate (%) # n - compounding frequency # y -", "# # P - principal # r - annual rate (%) # n", "Py = P0 (1 + r/n) ** (ny) # P = 1000 r", "interest # # P - principal # r - annual rate (%) #", "- compounding frequency # y - number of years # # Py =", "P0 (1 + r/n) ** (ny) # P = 1000 r = 2.5", "# compound interest # # P - principal # r - annual rate", "range(1, y+1): for period in range(1, n+1): amount *= (1 + r/100/n) #", "years # # Py = P0 (1 + r/n) ** (ny) # P", "rate (%) # n - compounding frequency # y - number of years", "2.5 # % n = 4 # quarterly y = 5 amount =", "# # Py = P0 (1 + r/n) ** (ny) # P =", "amount *= (1 + r/100/n) # r/100 since r is in % print('Amount", "is in % print('Amount ( year:', year, ', period:', period, ') :', round(amount,", "= 2.5 # % n = 4 # quarterly y = 5 amount", "n - compounding frequency # y - number of years # # Py", "n = 4 # quarterly y = 5 amount = P print('Amount (Starting):',", "r/100/n) # r/100 since r is in % print('Amount ( year:', year, ',", "(1 + r/100/n) # r/100 since r is in % print('Amount ( year:',", "r/n) ** (ny) # P = 1000 r = 2.5 # % n", "range(1, n+1): amount *= (1 + r/100/n) # r/100 since r is in", "in range(1, y+1): for period in range(1, n+1): amount *= (1 + r/100/n)", "(ny) # P = 1000 r = 2.5 # % n = 4", "(Starting):', amount) for year in range(1, y+1): for period in range(1, n+1): amount", "# P - principal # r - annual rate (%) # n -", "(1 + r/n) ** (ny) # P = 1000 r = 2.5 #", "period in range(1, n+1): amount *= (1 + r/100/n) # r/100 since r", "frequency # y - number of years # # Py = P0 (1", "since r is in % print('Amount ( year:', year, ', period:', period, ')", "# r - annual rate (%) # n - compounding frequency # y", "5 amount = P print('Amount (Starting):', amount) for year in range(1, y+1): for", "+ r/100/n) # r/100 since r is in % print('Amount ( year:', year,", "% n = 4 # quarterly y = 5 amount = P print('Amount", "of years # # Py = P0 (1 + r/n) ** (ny) #", "= 5 amount = P print('Amount (Starting):', amount) for year in range(1, y+1):", "1000 r = 2.5 # % n = 4 # quarterly y =", "y+1): for period in range(1, n+1): amount *= (1 + r/100/n) # r/100", "- principal # r - annual rate (%) # n - compounding frequency", "for period in range(1, n+1): amount *= (1 + r/100/n) # r/100 since", "year in range(1, y+1): for period in range(1, n+1): amount *= (1 +", "+ r/n) ** (ny) # P = 1000 r = 2.5 # %", "= P0 (1 + r/n) ** (ny) # P = 1000 r =" ]
[ "identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs):", "CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating a model instance in the", "from django.core.cache import cache from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal", "def cache_evict(sender, **kwargs): \"\"\" signal for updating a model instance in the cache;", "property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal", "item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating a model instance in", "django.core.cache import cache from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for", "must have a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item,", "item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating", "cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating a model instance", "signal must have a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.delete(item.cache_key)", "this signal must have a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance')", "\"\"\" signal for updating a model instance in the cache; any Model class", "the cache; any Model class using this signal must have a uniquely identifying", "using this signal must have a uniquely identifying 'cache_key' property. \"\"\" item =", "for updating a model instance in the cache; any Model class using this", "kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating a model", "**kwargs): \"\"\" signal for updating a model instance in the cache; any Model", "a model instance in the cache; any Model class using this signal must", "in the cache; any Model class using this signal must have a uniquely", "import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating a model instance in", "instance in the cache; any Model class using this signal must have a", "cache from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating a", "signal for updating a model instance in the cache; any Model class using", "any Model class using this signal must have a uniquely identifying 'cache_key' property.", "updating a model instance in the cache; any Model class using this signal", "class using this signal must have a uniquely identifying 'cache_key' property. \"\"\" item", "= kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating a", "def cache_update(sender, **kwargs): \"\"\" signal for updating a model instance in the cache;", "\"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for", "uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender,", "<filename>ecomstore/caching.py from django.core.cache import cache from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\"", "signal must have a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key,", "cache_update(sender, **kwargs): \"\"\" signal for updating a model instance in the cache; any", "a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def", ".settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating a model instance", "cache; any Model class using this signal must have a uniquely identifying 'cache_key'", "'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\"", "CACHE_TIMEOUT) def cache_evict(sender, **kwargs): \"\"\" signal for updating a model instance in the", "cache_evict(sender, **kwargs): \"\"\" signal for updating a model instance in the cache; any", "model instance in the cache; any Model class using this signal must have", "Model class using this signal must have a uniquely identifying 'cache_key' property. \"\"\"", "import cache from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating", "have a uniquely identifying 'cache_key' property. \"\"\" item = kwargs.get('instance') cache.set(item.cache_key, item, CACHE_TIMEOUT)", "from .settings import CACHE_TIMEOUT def cache_update(sender, **kwargs): \"\"\" signal for updating a model" ]
[ "'SHOW'} {setting};\" prefix = '' suffix = '' try: res = await conn.fetchval(q)", "} key_len = max((len(v) for v in settings)) for setting, validator in settings.items():", "prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting] = res return", "for v in settings)) for setting, validator in settings.items(): q = f\"{'SELECT' if", "validator in settings.items(): q = f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\"", "msg = validator(res) if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError,", "in settings.items(): q = f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\" prefix", "= '' try: res = await conn.fetchval(q) if validator: msg = validator(res) if", "try: res = await conn.fetchval(q) if validator: msg = validator(res) if msg: prefix,", "complex queries' if v != 'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem':", "in PG 11-12 for complex queries' if v != 'off' else '', 'shared_buffers':", "PG 11-12 for complex queries' if v != 'off' else '', 'shared_buffers': None,", "for complex queries' if v != 'off' else '', 'shared_buffers': None, 'work_mem': None,", "COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False results", "None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v in settings))", "-> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings =", "'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v in settings)) for setting, validator", "as ex: res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} =", "= { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG", "queries' if v != 'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None,", "asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection)", "from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def show_settings(conn:", "Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async", "postgis_version = float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v:", "'' try: res = await conn.fetchval(q) if validator: msg = validator(res) if msg:", "None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v)", "{} def parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version =", "'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for", "postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = { 'version()': None,", "= COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting] = res return results, postgis_version", "bool]: postgis_version = False results = {} def parse_postgis_ver(value) -> None: nonlocal postgis_version", "q = f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\" prefix = ''", "JIT in PG 11-12 for complex queries' if v != 'off' else '',", "11-12 for complex queries' if v != 'off' else '', 'shared_buffers': None, 'work_mem':", "= ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting] =", "for setting, validator in settings.items(): q = f\"{'SELECT' if '(' in setting else", "re from typing import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from", "None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None,", "show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False results = {} def", "'disable JIT in PG 11-12 for complex queries' if v != 'off' else", "import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR", "UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str,", "results = {} def parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value)", "suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message", "None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v in settings)) for setting,", "= {} def parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version", "= '' suffix = '' try: res = await conn.fetchval(q) if validator: msg", "Tuple[Dict[str, str], bool]: postgis_version = False results = {} def parse_postgis_ver(value) -> None:", "= COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix,", "None, } key_len = max((len(v) for v in settings)) for setting, validator in", "float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT", "False results = {} def parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)',", "= max((len(v) for v in settings)) for setting, validator in settings.items(): q =", "UndefinedObjectError) as ex: res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}}", "import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) ->", "from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version", "'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, }", "import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False", "Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False results = {} def parse_postgis_ver(value)", "Connection from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]:", "if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex:", "parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings", "= f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\" prefix = '' suffix", "msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res", "nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = { 'version()':", "ex: res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\")", "openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version =", "{ 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG 11-12", "res = await conn.fetchval(q) if validator: msg = validator(res) if msg: prefix, suffix", "value) postgis_version = float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda", "'jit': lambda v: 'disable JIT in PG 11-12 for complex queries' if v", "except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET", "'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather':", "'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None,", "m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()':", "{msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix, suffix = COLOR.RED,", "prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res =", "in settings)) for setting, validator in settings.items(): q = f\"{'SELECT' if '(' in", "'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG 11-12 for complex queries'", "ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting] = res", "prefix = '' suffix = '' try: res = await conn.fetchval(q) if validator:", "= validator(res) if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError)", "validator: msg = validator(res) if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except", "typing import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import", "{setting};\" prefix = '' suffix = '' try: res = await conn.fetchval(q) if", "<reponame>ldgeo/openmaptiles-tools<filename>openmaptiles/pgutils.py import re from typing import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError,", "setting else 'SHOW'} {setting};\" prefix = '' suffix = '' try: res =", "async def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False results =", "if '(' in setting else 'SHOW'} {setting};\" prefix = '' suffix = ''", "settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in", "None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v", "v: 'disable JIT in PG 11-12 for complex queries' if v != 'off'", "validator(res) if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as", "COLOR.RED, f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix, suffix", "re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit':", "v in settings)) for setting, validator in settings.items(): q = f\"{'SELECT' if '('", "conn.fetchval(q) if validator: msg = validator(res) if msg: prefix, suffix = COLOR.RED, f\"", "'(' in setting else 'SHOW'} {setting};\" prefix = '' suffix = '' try:", "else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers':", "-> Tuple[Dict[str, str], bool]: postgis_version = False results = {} def parse_postgis_ver(value) ->", "None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len", "key_len = max((len(v) for v in settings)) for setting, validator in settings.items(): q", "suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting] = res return results,", "'', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None,", "def show_settings(conn: Connection) -> Tuple[Dict[str, str], bool]: postgis_version = False results = {}", "'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v in", "= await conn.fetchval(q) if validator: msg = validator(res) if msg: prefix, suffix =", "'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG 11-12 for", "= False results = {} def parse_postgis_ver(value) -> None: nonlocal postgis_version m =", "'' suffix = '' try: res = await conn.fetchval(q) if validator: msg =", "res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"* {setting:{key_len}} = {prefix}{res}{suffix}\") results[setting]", "None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG 11-12 for complex", "max((len(v) for v in settings)) for setting, validator in settings.items(): q = f\"{'SELECT'", "(UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix, suffix = COLOR.RED, COLOR.RESET print(f\"*", "from typing import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils", "str], bool]: postgis_version = False results = {} def parse_postgis_ver(value) -> None: nonlocal", "def parse_postgis_ver(value) -> None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1))", "UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def show_settings(conn: Connection) -> Tuple[Dict[str, str],", "else 'SHOW'} {setting};\" prefix = '' suffix = '' try: res = await", "if validator: msg = validator(res) if msg: prefix, suffix = COLOR.RED, f\" {msg}{COLOR.RESET}\"", "!= 'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes':", "postgis_version = False results = {} def parse_postgis_ver(value) -> None: nonlocal postgis_version m", "setting, validator in settings.items(): q = f\"{'SELECT' if '(' in setting else 'SHOW'}", "f\" {msg}{COLOR.RESET}\" except (UndefinedFunctionError, UndefinedObjectError) as ex: res = ex.message prefix, suffix =", "await conn.fetchval(q) if validator: msg = validator(res) if msg: prefix, suffix = COLOR.RED,", "v != 'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections': None,", "import re from typing import Tuple, Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection", "if v != 'off' else '', 'shared_buffers': None, 'work_mem': None, 'maintenance_work_mem': None, 'max_connections':", "None: nonlocal postgis_version m = re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = {", "suffix = '' try: res = await conn.fetchval(q) if validator: msg = validator(res)", "= re.match(r'POSTGIS=\"(\\d+\\.\\d+)', value) postgis_version = float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver,", "settings.items(): q = f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\" prefix =", "= float(m.group(1)) settings = { 'version()': None, 'postgis_full_version()': parse_postgis_ver, 'jit': lambda v: 'disable", "'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len = max((len(v) for v in settings)) for", "Dict from asyncpg import UndefinedFunctionError, UndefinedObjectError, Connection from openmaptiles.perfutils import COLOR async def", "settings)) for setting, validator in settings.items(): q = f\"{'SELECT' if '(' in setting", "in setting else 'SHOW'} {setting};\" prefix = '' suffix = '' try: res", "parse_postgis_ver, 'jit': lambda v: 'disable JIT in PG 11-12 for complex queries' if", "lambda v: 'disable JIT in PG 11-12 for complex queries' if v !=", "f\"{'SELECT' if '(' in setting else 'SHOW'} {setting};\" prefix = '' suffix =", "'maintenance_work_mem': None, 'max_connections': None, 'max_worker_processes': None, 'max_parallel_workers': None, 'max_parallel_workers_per_gather': None, } key_len =" ]
[ "17, 19) for i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model =", "AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is None figure = FigureSpec((axes,), title=\"\")", "= AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is None figure = FigureSpec((axes,),", "13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images", "\"Test Images with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model =", "accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\": 3})", "13, 17, 19) for i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model", "== {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec()", "higher-dimensional arrays.\" dims = (5, 7, 11, 13, 17, 19) for i in", "view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is", "= Images(\"ccd\", axes=axes) assert model.figure is None figure = FigureSpec((axes,), title=\"\") assert model.figure", "run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\": 3}) view", "model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims", "run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def", "numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run)", "axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is None figure =", "dims = (5, 7, 11, 13, 17, 19) for i in range(3, len(dims)):", "= build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties():", "model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims =", "<gh_stars>0 from bluesky_live.run_builder import build_simple_run import numpy from ..plot_builders import Images from ..plot_specs", "is run assert model.field == \"c * ccd\" assert dict(model.namespace) == {\"c\": 3}", "in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure)", "import Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image():", "model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims = (5, 7,", "7, 11, 13, 17, 19) for i in range(3, len(dims)): run = build_simple_run({\"ccd\":", "axes=axes) assert model.figure is None figure = FigureSpec((axes,), title=\"\") assert model.figure is figure", "range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run)", "test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims = (5, 7, 11, 13, 17,", "11, 13, 17, 19) for i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])})", "from ..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure", "..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test Images with", "ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation():", "= Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run", "assert model.figure is None figure = FigureSpec((axes,), title=\"\") assert model.figure is figure view", "\"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\",", "model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close()", "dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes =", "various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\":", "* ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def", "model.figure is None figure = FigureSpec((axes,), title=\"\") assert model.figure is figure view =", "build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure)", "i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view =", "with higher-dimensional arrays.\" dims = (5, 7, 11, 13, 17, 19) for i", "Images(\"ccd\", axes=axes) assert model.figure is None figure = FigureSpec((axes,), title=\"\") assert model.figure is", "13))}) model = Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert", "model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\"", "model = Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0]", "def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is None", "(5, 7, 11, 13, 17, 19) for i in range(3, len(dims)): run =", "arrays.\" dims = (5, 7, 11, 13, 17, 19) for i in range(3,", "= Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is", "None figure = FigureSpec((axes,), title=\"\") assert model.figure is figure view = HeadlessFigure(model.figure) view.close()", "= HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11,", "for i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view", "view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\":", "3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field == \"c", "a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view =", "test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure is None figure", "with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view", "= HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field == \"c * ccd\"", "Images with higher-dimensional arrays.\" dims = (5, 7, 11, 13, 17, 19) for", "HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with", "build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch", "* ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert", "{\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model", "model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model", "== \"c * ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",)", "view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field == \"c *", "assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\",", "namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field ==", "\"c * ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close()", "..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def", "= build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c * ccd\", namespace={\"c\": 3}) view =", "run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not", "is None figure = FigureSpec((axes,), title=\"\") assert model.figure is figure view = HeadlessFigure(model.figure)", "view.close() def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model =", "import numpy from ..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures", "view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims = (5, 7, 11,", "from ...headless.figures import HeadlessFigure def test_image(): \"Test Images with a 2D array.\" run", "2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure)", "AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test Images with a 2D", "assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes", "model.add_run(run) assert model.runs[0] is run assert model.field == \"c * ccd\" assert dict(model.namespace)", "Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run =", "= build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images", "run assert model.field == \"c * ccd\" assert dict(model.namespace) == {\"c\": 3} assert", "assert model.runs[0] is run assert model.field == \"c * ccd\" assert dict(model.namespace) ==", "FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test Images with a 2D array.\"", "numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various", "\"Test Images with higher-dimensional arrays.\" dims = (5, 7, 11, 13, 17, 19)", "test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c *", "numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert", "19) for i in range(3, len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\")", "== (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert", "Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test", "= HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images", "Images with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\")", "numpy from ..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import", "assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional", "HeadlessFigure def test_image(): \"Test Images with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11,", "def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"c", "model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes)", "model = Images(\"ccd\", axes=axes) assert model.figure is None figure = FigureSpec((axes,), title=\"\") assert", "(\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model = Images(\"ccd\", axes=axes) assert model.figure", "def test_image(): \"Test Images with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))})", "import build_simple_run import numpy from ..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec", "Images(\"c * ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run", "= (5, 7, 11, 13, 17, 19) for i in range(3, len(dims)): run", "bluesky_live.run_builder import build_simple_run import numpy from ..plot_builders import Images from ..plot_specs import AxesSpec,", "from bluesky_live.run_builder import build_simple_run import numpy from ..plot_builders import Images from ..plot_specs import", "model.field == \"c * ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams ==", "= Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def", "Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction():", "assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims = (5,", "ccd\", namespace={\"c\": 3}) view = HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field", "model.runs[0] is run assert model.field == \"c * ccd\" assert dict(model.namespace) == {\"c\":", "array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert", "3} assert model.needs_streams == (\"primary\",) view.close() def test_figure_set_after_instantiation(): axes = AxesSpec() model =", "HeadlessFigure(model.figure) model.add_run(run) assert model.runs[0] is run assert model.field == \"c * ccd\" assert", "test_image(): \"Test Images with a 2D array.\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model", "import HeadlessFigure def test_image(): \"Test Images with a 2D array.\" run = build_simple_run({\"ccd\":", "build_simple_run import numpy from ..plot_builders import Images from ..plot_specs import AxesSpec, FigureSpec from", "import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test Images with a", "view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test", "len(dims)): run = build_simple_run({\"ccd\": numpy.random.random(dims[:i])}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) model.add_run(run) view.close()", "HeadlessFigure(model.figure) model.add_run(run) view.close() def test_properties(): \"Touch various accessors\" run = build_simple_run({\"ccd\": numpy.random.random((11, 13))})", "from ..plot_specs import AxesSpec, FigureSpec from ...headless.figures import HeadlessFigure def test_image(): \"Test Images", "not model.figure.axes[0].images model.add_run(run) assert model.figure.axes[0].images view.close() def test_image_reduction(): \"Test Images with higher-dimensional arrays.\"", "def test_image_reduction(): \"Test Images with higher-dimensional arrays.\" dims = (5, 7, 11, 13,", "...headless.figures import HeadlessFigure def test_image(): \"Test Images with a 2D array.\" run =", "assert model.field == \"c * ccd\" assert dict(model.namespace) == {\"c\": 3} assert model.needs_streams", "build_simple_run({\"ccd\": numpy.random.random((11, 13))}) model = Images(\"ccd\") view = HeadlessFigure(model.figure) assert not model.figure.axes[0].images model.add_run(run)" ]
[ "distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0])", "\"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table =", "as np import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE = 10 #", "printTable() plotResult() def readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\") for line", "Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language:", "- 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename):", "b = pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2)", "ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30)", "range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult()", "file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1])", "indices = list(zip(*np.where(table == max))) print(\"The longest distance pair is {pair}\".format(pair = indices[0]))", "pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\")", "= list(zip(*np.where(table == max))) print(\"The longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot", "{i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename):", "print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) -", "def plotResult(): x = cood[:, 0] y = cood[:, 1] fig, ax =", "Map # Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018", "* table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i", "for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 *", "def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe", "indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0]", "print(\"Original input:\") for line in file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1])", "readWords(filename): global WORDS with open(filename) as file: WORDS = file.read().splitlines() def printTable(): for", "# Part 2: Fast Map # Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME>", "for i in range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2)", "range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x},", "in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x", "y = cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x,", "DIMENSION = 2 DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"]", "print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b", "file.read().splitlines() def printTable(): for row in table: print(row) def pickLongestPair(): max = np.amax(table)", "axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for", "1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def", "is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen):", "print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\") as file:", "pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest distance pair", "table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global WORDS with open(filename)", "j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult():", "Python 3.6 # import numpy as np import matplotlib.pyplot as plt DIMENSION =", "# import numpy as np import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE", "def printTable(): for row in table: print(row) def pickLongestPair(): max = np.amax(table) indices", "= cood[:, 0] y = cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis')", "updateTable(dimen): for i in range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] =", "= cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y)", "2: Fast Map # Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) #", "0] y = cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis')", "# WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\"", "matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE = 10 # WORDS = [\"acting\",", "3 # Part 2: Fast Map # Group Members: <NAME> (zhan198), <NAME> (minyihua),", "\"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE,", "table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main():", "with open(filename) as file: WORDS = file.read().splitlines() def printTable(): for row in table:", "= file.read().splitlines() def printTable(): for row in table: print(row) def pickLongestPair(): max =", "= [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name =", "= np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name)", "= [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in", "\"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table", "Language: Python 3.6 # import numpy as np import matplotlib.pyplot as plt DIMENSION", "(zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language: Python 3.6", "fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\",", "\"r\") as file: print(\"Original input:\") for line in file: line_array = line.split() print(line_array)", "{piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The", "DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for", "= line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0])", "= np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS)", "WORDS = file.read().splitlines() def printTable(): for row in table: print(row) def pickLongestPair(): max", "ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i,", "file: print(\"Original input:\") for line in file: line_array = line.split() print(line_array) table[int(line_array[0]) -", "= \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION))", "range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] -", "= float(line_array[2]) def readWords(filename): global WORDS with open(filename) as file: WORDS = file.read().splitlines()", "ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt in", "s=30) plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS): ax.annotate(txt, (x[i], y[i])) plt.show()", "for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def", "cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i,", "= (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x=", "<NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language: Python", "table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2", "y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS):", "np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal", "def readWords(filename): global WORDS with open(filename) as file: WORDS = file.read().splitlines() def printTable():", "= 10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name", "readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood:", "ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\")", "[\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt'", "\\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global WORDS with", "np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3)))", "updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\") for", "# Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 #", "DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\")", "indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\")", "- 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global WORDS with open(filename) as", "\"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot", "piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1]", "Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming", "line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) -", "= np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y", "- cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y = cood[:, 1] fig,", "table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in", "- 1] = float(line_array[2]) def readWords(filename): global WORDS with open(filename) as file: WORDS", "== max))) print(\"The longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont", "# INF 552 Homework 3 # Part 2: Fast Map # Group Members:", "global WORDS with open(filename) as file: WORDS = file.read().splitlines() def printTable(): for row", "plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS): ax.annotate(txt, (x[i], y[i])) plt.show() main()", "printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table:", "in range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen]", "in table: print(row) def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table == max)))", "print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def", "def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for i", "- 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2])", "# Date: 2/27/2018 # Programming Language: Python 3.6 # import numpy as np", "line in file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] =", "in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print", "(np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0],", "print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0,", "print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1))", "plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map", "np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest distance pair is {pair}\".format(pair =", "in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable()", "def updateTable(dimen): for i in range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j]", "2 DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS =", "Fast Map # Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac) # Date:", "readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\") for line in file: line_array", "plt DIMENSION = 2 DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\", \"compute\",", "- np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y = cood[:,", "longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0]))", "Homework 3 # Part 2: Fast Map # Group Members: <NAME> (zhan198), <NAME>", "calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\") as", "(2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for", "table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] =", "\".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename,", "np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable()", "= pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen]", "coordinate table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/", "table: \") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\") as file: print(\"Original", "axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt", "Date: 2/27/2018 # Programming Language: Python 3.6 # import numpy as np import", "list(zip(*np.where(table == max))) print(\"The longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is", "(jeffyjac) # Date: 2/27/2018 # Programming Language: Python 3.6 # import numpy as", "np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen):", "DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:,", "3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0, DATA_SIZE): for j in range(0,", "DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = []", "10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name =", "print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate", "= [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood", "[] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood =", "float(line_array[2]) def readWords(filename): global WORDS with open(filename) as file: WORDS = file.read().splitlines() def", "Programming Language: Python 3.6 # import numpy as np import matplotlib.pyplot as plt", "round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0, DATA_SIZE): for j in", "for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \")", "as file: print(\"Original input:\") for line in file: line_array = line.split() print(line_array) table[int(line_array[0])", "main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st", "i in range(0, DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) -", "pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2)", "in file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\", "line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) -", "print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1]", "max))) print(\"The longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv", "print(\"The longest distance pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv =", "3.6 # import numpy as np import matplotlib.pyplot as plt DIMENSION = 2", "= indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a =", "plotResult(): x = cood[:, 0] y = cood[:, 1] fig, ax = plt.subplots()", "cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y = cood[:, 1] fig, ax", "numpy as np import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE = 10", "(\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0, DATA_SIZE):", "[] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION):", "1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y,", "pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen] =", "= plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast", "x = cood[:, 0] y = cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X", "color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS): ax.annotate(txt, (x[i], y[i]))", "WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name", "WORDS = [] data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE))", "import numpy as np import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE =", "pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name) print(WORDS) printTable() for i", "1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global WORDS with open(filename) as file:", "open(filename, \"r\") as file: print(\"Original input:\") for line in file: line_array = line.split()", "i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i)", "= \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global WORDS", "table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:, 0]", "table: print(row) def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The", "# Programming Language: Python 3.6 # import numpy as np import matplotlib.pyplot as", "input:\") for line in file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) -", "range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x =", "plotResult() def readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\") for line in", "WORDS with open(filename) as file: WORDS = file.read().splitlines() def printTable(): for row in", "x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0, DATA_SIZE): for j", "<NAME> (minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language: Python 3.6 #", "for row in table: print(row) def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table", "def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest distance", "(minyihua), <NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language: Python 3.6 # import", "np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y = cood[:, 1]", "cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i) print(\"\\nUpdate table: \") updateTable(i) printTable() plotResult() def readFile(filename): with", "= 2 DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\", \"compute\", \"coward\",\"forward\",\"interaction\",\"activity\",\"odor\",\"order\",\"international\"] WORDS", "cood[:, 0] y = cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y", "+ np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1],", "as file: WORDS = file.read().splitlines() def printTable(): for row in table: print(row) def", "words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot =", "plt.scatter(x, y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS): ax.annotate(txt,", "- np.power(table[i][b],2))/ (2 * table[a][b]) print (\"{i}\\t({x}, {y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def", "# # INF 552 Homework 3 # Part 2: Fast Map # Group", "readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair() calculateCoordinate(i)", "table:\") readWords(words_file_name) print(WORDS) printTable() for i in range(DIMENSION): print(\"\\n\\nThe {i}st cood: \".format(i=i+1)) pickLongestPair()", "Part 2: Fast Map # Group Members: <NAME> (zhan198), <NAME> (minyihua), <NAME> (jeffyjac)", "open(filename) as file: WORDS = file.read().splitlines() def printTable(): for row in table: print(row)", "row in table: print(row) def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table ==", "<NAME> (jeffyjac) # Date: 2/27/2018 # Programming Language: Python 3.6 # import numpy", "1] = float(line_array[2]) def readWords(filename): global WORDS with open(filename) as file: WORDS =", "pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for", "DATA_SIZE): for j in range(0, DATA_SIZE): table[i][j] = np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2))", "printTable(): for row in table: print(row) def pickLongestPair(): max = np.amax(table) indices =", "calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for i in", "def readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\") for line in file:", "as plt DIMENSION = 2 DATA_SIZE = 10 # WORDS = [\"acting\", \"activist\",", "a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)):", "= np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest distance pair is {pair}\".format(pair", "3))) def updateTable(dimen): for i in range(0, DATA_SIZE): for j in range(0, DATA_SIZE):", "1] = \\ table[int(line_array[1]) - 1][int(line_array[0]) - 1] = float(line_array[2]) def readWords(filename): global", "= indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b = pivot[dimen][1] print(\"The coordinate", "print(row) def pickLongestPair(): max = np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest", "with open(filename, \"r\") as file: print(\"Original input:\") for line in file: line_array =", "for line in file: line_array = line.split() print(line_array) table[int(line_array[0]) - 1][int(line_array[1]) - 1]", "np.sqrt(np.power(table[i][j],2) - np.power((cood[i][dimen] - cood[j][dimen]),2)) def plotResult(): x = cood[:, 0] y =", "i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) + np.power(table[a][b],2) - np.power(table[i][b],2))/ (2 * table[a][b])", "pair is {pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def", "file: WORDS = file.read().splitlines() def printTable(): for row in table: print(row) def pickLongestPair():", "cood[:, 1] fig, ax = plt.subplots() ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.scatter(x, y) plt.scatter(x,", "\") updateTable(i) printTable() plotResult() def readFile(filename): with open(filename, \"r\") as file: print(\"Original input:\")", "552 Homework 3 # Part 2: Fast Map # Group Members: <NAME> (zhan198),", "{y})\".format(i=i, x= round(cood[i][0], 3),y=round(cood[i][1], 3))) def updateTable(dimen): for i in range(0, DATA_SIZE): for", "data_file_name = \"fastmap-data.txt\" words_file_name = 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE,", "cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def main(): readFile(data_file_name) print(\"\\nOriginal table:\") readWords(words_file_name)", "= 'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = []", "y, color=\"red\", s=30) plt.title(\"Fast Map Result\") for i, txt in enumerate(WORDS): ax.annotate(txt, (x[i],", "is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a = pivot[dimen][0] b =", "INF 552 Homework 3 # Part 2: Fast Map # Group Members: <NAME>", "= pivot[dimen][1] print(\"The coordinate table\") for i in range(len(table)): cood[i][dimen] = (np.power(table[a][i],2) +", "max = np.amax(table) indices = list(zip(*np.where(table == max))) print(\"The longest distance pair is", "'fastmap-wordlist.txt' table = np.zeros(shape=(DATA_SIZE, DATA_SIZE)) cood = np.zeros(shape=(DATA_SIZE, DIMENSION)) pivot = [] def", "import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE = 10 # WORDS =", "2/27/2018 # Programming Language: Python 3.6 # import numpy as np import matplotlib.pyplot", "{pair}\".format(pair = indices[0])) print(\"Pivot is piont {piv}\".format(piv = indices[0][0])) pivot.append(indices[0]) def calculateCoordinate(dimen): a", "np import matplotlib.pyplot as plt DIMENSION = 2 DATA_SIZE = 10 # WORDS" ]
[ "wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16 # wav is 16 bits", "= wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16 # wav is 16", "16 bits radix = \"HEX\" # address and data in hex print(\"DEPTH =\",", "str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for", "print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue =", "and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\")", "% i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1], frameValue[0]) print(frameAddr, \":\",", "input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16 # wav", "< 2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes()", "\"HEX\" # address and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\")", "hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\")", "print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue = waveReader.readframes(1)", "i in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue = waveReader.readframes(1) hexFrame =", "print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16 #", "radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue", "# wav is 16 bits radix = \"HEX\" # address and data in", "radix = \"HEX\" # address and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH", "import wave, sys if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader =", "str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()):", "i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1], frameValue[0]) print(frameAddr, \":\", hexFrame+\";\")", "print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i", "print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr", "=\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\")", "print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\")", "= \"%X\" % i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1], frameValue[0])", "\"%X\" % i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1], frameValue[0]) print(frameAddr,", "args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16", "address and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\",", "16 # wav is 16 bits radix = \"HEX\" # address and data", "radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\"", "in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\"", "data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX", "#!/usr/bin/python import wave, sys if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader", "# address and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX", "frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1], frameValue[0]) print(frameAddr, \":\", hexFrame+\";\") print(\"\\nEND;\")", "= \"HEX\" # address and data in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\",", "width = 16 # wav is 16 bits radix = \"HEX\" # address", "range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" %", "if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth", "waveReader.getnframes() width = 16 # wav is 16 bits radix = \"HEX\" #", "depth = waveReader.getnframes() width = 16 # wav is 16 bits radix =", "wav is 16 bits radix = \"HEX\" # address and data in hex", "=\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in", "print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width =", "2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width", "print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\" %", "= 16 # wav is 16 bits radix = \"HEX\" # address and", "bits radix = \"HEX\" # address and data in hex print(\"DEPTH =\", str(depth)+\";\")", "<filename>waves/WAVtoMIF.py<gh_stars>1-10 #!/usr/bin/python import wave, sys if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\")", "len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb') depth =", "is 16 bits radix = \"HEX\" # address and data in hex print(\"DEPTH", "wave, sys if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1],", "in hex print(\"DEPTH =\", str(depth)+\";\") print(\"WIDTH =\", str(width)+\";\") print(\"ADDRESS_RADIX =\", radix+\";\") print(\"DATA_RADIX =\",", "=\", radix+\";\") print(\"DATA_RADIX =\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr =", "frameAddr = \"%X\" % i frameValue = waveReader.readframes(1) hexFrame = \"%02X%02X\" % (frameValue[1],", "=\", radix+\";\") print(\"CONTENT\") print(\"BEGIN\\n\") for i in range(waveReader.getnframes()): frameAddr = \"%X\" % i", "sys if len(sys.argv) < 2: print(\"Missing args.\") print(\"Expected: input\") waveReader = wave.open(sys.argv[1], 'rb')", "'rb') depth = waveReader.getnframes() width = 16 # wav is 16 bits radix", "= waveReader.getnframes() width = 16 # wav is 16 bits radix = \"HEX\"", "for i in range(waveReader.getnframes()): frameAddr = \"%X\" % i frameValue = waveReader.readframes(1) hexFrame", "waveReader = wave.open(sys.argv[1], 'rb') depth = waveReader.getnframes() width = 16 # wav is" ]
[ "https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\" :type m: int :type", "\"\"\" A robot is located at the top-left corner of a m x", "num *= i for i in range(m - 1, 0, -1): den *=", "below). The robot can only move either down or right at any point", "utf-8 -*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot is located at the", "How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self,", "2, n - 1, -1): num *= i for i in range(m -", "= 1 den = 1 for i in range(m + n - 2,", "i in range(m + n - 2, n - 1, -1): num *=", "- 2, n - 1, -1): num *= i for i in range(m", "possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n):", "only move either down or right at any point in time. The robot", "many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m,", "1 for i in range(m + n - 2, n - 1, -1):", "in the diagram below). How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\"", "the top-left corner of a m x n grid (marked 'Start' in the", "int :type n: int :rtype: int \"\"\" # C^m_m+n num = 1 den", "in range(m + n - 2, n - 1, -1): num *= i", ":type m: int :type n: int :rtype: int \"\"\" # C^m_m+n num =", "a m x n grid (marked 'Start' in the diagram below). The robot", "right at any point in time. The robot is trying to reach the", "uniquePaths(self, m, n): \"\"\" :type m: int :type n: int :rtype: int \"\"\"", "\"\"\" :type m: int :type n: int :rtype: int \"\"\" # C^m_m+n num", "there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\" :type m: int", "range(m + n - 2, n - 1, -1): num *= i for", "the bottom-right corner of the grid (marked 'Finish' in the diagram below). How", "The robot is trying to reach the bottom-right corner of the grid (marked", "reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).", "the diagram below). The robot can only move either down or right at", "point in time. The robot is trying to reach the bottom-right corner of", "down or right at any point in time. The robot is trying to", "for i in range(m + n - 2, n - 1, -1): num", "\"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\" :type m: int :type n:", "n: int :rtype: int \"\"\" # C^m_m+n num = 1 den = 1", "# -*- coding: utf-8 -*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot is", "A robot is located at the top-left corner of a m x n", "corner of the grid (marked 'Finish' in the diagram below). How many possible", "m, n): \"\"\" :type m: int :type n: int :rtype: int \"\"\" #", "corner of a m x n grid (marked 'Start' in the diagram below).", "any point in time. The robot is trying to reach the bottom-right corner", "robot is trying to reach the bottom-right corner of the grid (marked 'Finish'", "'Finish' in the diagram below). How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/", "m x n grid (marked 'Start' in the diagram below). The robot can", "or right at any point in time. The robot is trying to reach", "int \"\"\" # C^m_m+n num = 1 den = 1 for i in", "The robot can only move either down or right at any point in", "1 den = 1 for i in range(m + n - 2, n", "+ n - 2, n - 1, -1): num *= i for i", "robot is located at the top-left corner of a m x n grid", "in range(m - 1, 0, -1): den *= i return num / den", "in the diagram below). The robot can only move either down or right", "grid (marked 'Start' in the diagram below). The robot can only move either", "int :rtype: int \"\"\" # C^m_m+n num = 1 den = 1 for", "coding: utf-8 -*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot is located at", "is trying to reach the bottom-right corner of the grid (marked 'Finish' in", "<NAME> <<EMAIL>> \"\"\" A robot is located at the top-left corner of a", "top-left corner of a m x n grid (marked 'Start' in the diagram", "-*- coding: utf-8 -*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot is located", "n - 2, n - 1, -1): num *= i for i in", "# Authors: <NAME> <<EMAIL>> \"\"\" A robot is located at the top-left corner", "located at the top-left corner of a m x n grid (marked 'Start'", "either down or right at any point in time. The robot is trying", "at the top-left corner of a m x n grid (marked 'Start' in", "C^m_m+n num = 1 den = 1 for i in range(m + n", "- 1, -1): num *= i for i in range(m - 1, 0,", "class Solution(object): def uniquePaths(self, m, n): \"\"\" :type m: int :type n: int", "to reach the bottom-right corner of the grid (marked 'Finish' in the diagram", "trying to reach the bottom-right corner of the grid (marked 'Finish' in the", "n): \"\"\" :type m: int :type n: int :rtype: int \"\"\" # C^m_m+n", "the diagram below). How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class", "are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\" :type m:", "= 1 for i in range(m + n - 2, n - 1,", "n - 1, -1): num *= i for i in range(m - 1,", "(marked 'Finish' in the diagram below). How many possible unique paths are there?", "unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\"", "def uniquePaths(self, m, n): \"\"\" :type m: int :type n: int :rtype: int", "for i in range(m - 1, 0, -1): den *= i return num", "'Start' in the diagram below). The robot can only move either down or", "i in range(m - 1, 0, -1): den *= i return num /", "1, -1): num *= i for i in range(m - 1, 0, -1):", "<<EMAIL>> \"\"\" A robot is located at the top-left corner of a m", "Authors: <NAME> <<EMAIL>> \"\"\" A robot is located at the top-left corner of", "*= i for i in range(m - 1, 0, -1): den *= i", "Solution(object): def uniquePaths(self, m, n): \"\"\" :type m: int :type n: int :rtype:", "(marked 'Start' in the diagram below). The robot can only move either down", "of a m x n grid (marked 'Start' in the diagram below). The", "num = 1 den = 1 for i in range(m + n -", "time. The robot is trying to reach the bottom-right corner of the grid", "the grid (marked 'Finish' in the diagram below). How many possible unique paths", "move either down or right at any point in time. The robot is", "m: int :type n: int :rtype: int \"\"\" # C^m_m+n num = 1", "paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def uniquePaths(self, m, n): \"\"\" :type", "\"\"\" # C^m_m+n num = 1 den = 1 for i in range(m", "below). How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object): def", "at any point in time. The robot is trying to reach the bottom-right", "-1): num *= i for i in range(m - 1, 0, -1): den", "is located at the top-left corner of a m x n grid (marked", "# C^m_m+n num = 1 den = 1 for i in range(m +", ":type n: int :rtype: int \"\"\" # C^m_m+n num = 1 den =", "den = 1 for i in range(m + n - 2, n -", "i for i in range(m - 1, 0, -1): den *= i return", "diagram below). How many possible unique paths are there? https://leetcode.com/problems/unique-paths/description/ \"\"\" class Solution(object):", "bottom-right corner of the grid (marked 'Finish' in the diagram below). How many", "x n grid (marked 'Start' in the diagram below). The robot can only", ":rtype: int \"\"\" # C^m_m+n num = 1 den = 1 for i", "can only move either down or right at any point in time. The", "n grid (marked 'Start' in the diagram below). The robot can only move", "diagram below). The robot can only move either down or right at any", "<filename>algorithms/python/leetcode/UniquePaths.py # -*- coding: utf-8 -*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot", "of the grid (marked 'Finish' in the diagram below). How many possible unique", "-*- # Authors: <NAME> <<EMAIL>> \"\"\" A robot is located at the top-left", "grid (marked 'Finish' in the diagram below). How many possible unique paths are", "robot can only move either down or right at any point in time.", "in time. The robot is trying to reach the bottom-right corner of the" ]
[ "numpy from sympy import Symbol, lambdify, sin, pprint a = numpy.arange(10) x =", "from sympy import Symbol, lambdify, sin, pprint a = numpy.arange(10) x = Symbol('x')", "Symbol, lambdify, sin, pprint a = numpy.arange(10) x = Symbol('x') expr = sin(x)", "sympy import Symbol, lambdify, sin, pprint a = numpy.arange(10) x = Symbol('x') expr", "<reponame>janbodnar/Python-Course #!/usr/bin/python import numpy from sympy import Symbol, lambdify, sin, pprint a =", "lambdify, sin, pprint a = numpy.arange(10) x = Symbol('x') expr = sin(x) f", "a = numpy.arange(10) x = Symbol('x') expr = sin(x) f = lambdify(x, expr,", "pprint a = numpy.arange(10) x = Symbol('x') expr = sin(x) f = lambdify(x,", "import numpy from sympy import Symbol, lambdify, sin, pprint a = numpy.arange(10) x", "sin, pprint a = numpy.arange(10) x = Symbol('x') expr = sin(x) f =", "numpy.arange(10) x = Symbol('x') expr = sin(x) f = lambdify(x, expr, \"numpy\") pprint(f(a))", "#!/usr/bin/python import numpy from sympy import Symbol, lambdify, sin, pprint a = numpy.arange(10)", "import Symbol, lambdify, sin, pprint a = numpy.arange(10) x = Symbol('x') expr =", "= numpy.arange(10) x = Symbol('x') expr = sin(x) f = lambdify(x, expr, \"numpy\")" ]
[ "os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets'", "getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s (%d)' % (ii, tbn,", "import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' %", "import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for", "#!/usr/bin/python # -*- coding: utf-8 -*- __author__ = 'ar' import os import glob", "'ar' import os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ ==", "utf-8 -*- __author__ = 'ar' import os import glob from app.backend.core.utils import getDirectorySizeInBytes,", "= getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s (%d)' % (ii,", "tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s (%d)' % (ii, tbn, tsizeHuman,", "getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)):", "enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s", "tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s (%d)' %", "path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s", "path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman =", "-*- __author__ = 'ar' import os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize", "app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*'", "if __name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize", "from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp in", "humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp)", "= 'ar' import os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__", "__name__ == '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize =", "for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize)", "# -*- coding: utf-8 -*- __author__ = 'ar' import os import glob from", "% path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s :", "'__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman", "__author__ = 'ar' import os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if", "coding: utf-8 -*- __author__ = 'ar' import os import glob from app.backend.core.utils import", "import os import glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__':", "in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d]", "glob from app.backend.core.utils import getDirectorySizeInBytes, humanReadableSize if __name__ == '__main__': path='../../../data/datasets' for ii,pp", "ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print", "== '__main__': path='../../../data/datasets' for ii,pp in enumerate(glob.glob('%s/*' % path)): tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp)", "tbn=os.path.basename(pp) tsize = getDirectorySizeInBytes(pp) tsizeHuman = humanReadableSize(tsize) print ('[%d] %s : %s (%d)'", "= humanReadableSize(tsize) print ('[%d] %s : %s (%d)' % (ii, tbn, tsizeHuman, tsize))", "-*- coding: utf-8 -*- __author__ = 'ar' import os import glob from app.backend.core.utils" ]
[ "# import pytest import subprocess from ._subprocess import run_cli_tool def _unittest_trivial() -> None:", "_unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with", "with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with", "'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout", "# Author: <NAME> <<EMAIL>> # import pytest import subprocess from ._subprocess import run_cli_tool", "Development Team # This software is distributed under the terms of the MIT", "under the terms of the MIT License. # Author: <NAME> <<EMAIL>> # import", "pytest import subprocess from ._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0)", "def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0)", "distributed under the terms of the MIT License. # Author: <NAME> <<EMAIL>> #", "subprocess from ._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError):", "timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0)", "timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout run_cli_tool('pub',", "-> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError):", "from ._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0)", "is distributed under the terms of the MIT License. # Author: <NAME> <<EMAIL>>", "run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command',", "._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with", "import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError):", "run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large", "# Look-up of a nonexistent package requires large timeout run_cli_tool('pub', 'nonexistent.data.Type.1.0', '{}', '--tr=Loopback(None)',", "software is distributed under the terms of the MIT License. # Author: <NAME>", "License. # Author: <NAME> <<EMAIL>> # import pytest import subprocess from ._subprocess import", "<<EMAIL>> # import pytest import subprocess from ._subprocess import run_cli_tool def _unittest_trivial() ->", "Copyright (c) 2019 UAVCAN Development Team # This software is distributed under the", "import pytest import subprocess from ._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport',", "2019 UAVCAN Development Team # This software is distributed under the terms of", "import subprocess from ._subprocess import run_cli_tool def _unittest_trivial() -> None: run_cli_tool('show-transport', timeout=2.0) with", "# This software is distributed under the terms of the MIT License. #", "with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up", "None: run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg',", "pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout run_cli_tool('pub', 'nonexistent.data.Type.1.0', '{}',", "pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of", "UAVCAN Development Team # This software is distributed under the terms of the", "run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a", "terms of the MIT License. # Author: <NAME> <<EMAIL>> # import pytest import", "pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires", "<NAME> <<EMAIL>> # import pytest import subprocess from ._subprocess import run_cli_tool def _unittest_trivial()", "Look-up of a nonexistent package requires large timeout run_cli_tool('pub', 'nonexistent.data.Type.1.0', '{}', '--tr=Loopback(None)', timeout=5.0)", "timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent", "with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package", "the MIT License. # Author: <NAME> <<EMAIL>> # import pytest import subprocess from", "# # Copyright (c) 2019 UAVCAN Development Team # This software is distributed", "Author: <NAME> <<EMAIL>> # import pytest import subprocess from ._subprocess import run_cli_tool def", "MIT License. # Author: <NAME> <<EMAIL>> # import pytest import subprocess from ._subprocess", "# Copyright (c) 2019 UAVCAN Development Team # This software is distributed under", "with pytest.raises(subprocess.CalledProcessError): # Look-up of a nonexistent package requires large timeout run_cli_tool('pub', 'nonexistent.data.Type.1.0',", "pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError):", "of the MIT License. # Author: <NAME> <<EMAIL>> # import pytest import subprocess", "(c) 2019 UAVCAN Development Team # This software is distributed under the terms", "This software is distributed under the terms of the MIT License. # Author:", "run_cli_tool('show-transport', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path',", "run_cli_tool(timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('invalid-command', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): run_cli_tool('dsdl-gen-pkg', 'nonexistent/path', timeout=2.0) with pytest.raises(subprocess.CalledProcessError): #", "Team # This software is distributed under the terms of the MIT License.", "the terms of the MIT License. # Author: <NAME> <<EMAIL>> # import pytest" ]
[ "< self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value", "# Predict next actions with target policy network next_action = self.target_policy_net(next_state) # Predict", "-policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data *", "gamma, weights, inds = batch state = np.asarray(state) action = np.asarray(action) reward =", "self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) #", "# Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay:", "critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in buffer", "+ param.data * self.tau ) # Send updated learner to the queue if", "as nn import torch.optim as optim import queue from insomnia.utils import empty_torch_queue from", "if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss *", "self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data)", "Z distribution with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected", "weights, inds = batch state = np.asarray(state) action = np.asarray(action) reward = np.asarray(reward)", "0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min) /", "(1.0 - self.tau) + param.data * self.tau ) for target_param, param in zip(self.target_policy_net.parameters(),", "# Send updated learner to the queue if update_step.value % 100 == 0:", "self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max -", "n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau = 0.001 self.gamma = 0.998", "input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param,", "try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self,", "+= 1 if update_step.value % 1000 == 0: print(\"Training step \", update_step.value) training_on.value", "self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value +=", "queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value % 1000 ==", "for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) +", "value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss =", "= -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data", "gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state,", "param.data * self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data *", "optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue,", "done, gamma, weights, inds = batch state = np.asarray(state) action = np.asarray(action) reward", "except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value % 1000", "= torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done =", "updated learner to the queue if update_step.value % 100 == 0: try: params", "100 == 0: try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except:", "self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims,", "distribution with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution", "self.batch_size = 256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue", "= 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss =", "numpy as np import torch import torch.nn as nn import torch.optim as optim", "update_step.value % 100 == 0: try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()]", "self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net =", "policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad()", "= optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch,", "% 100 == 0: try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params)", "= np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action =", "self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau", "0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max", "target_param.data * (1.0 - self.tau) + param.data * self.tau ) # Send updated", "for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer =", "= policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward()", "self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device)", "self.v_min) / (self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu' #", "self.num_train_steps = 10000 self.batch_size = 256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay", "= self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1)", "------- Update critic ------- # Predict next actions with target policy network next_action", "0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1)", "action, reward, next_state, done, gamma, weights, inds = batch state = np.asarray(state) action", "import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\" def __init__(self,", "self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma **", "n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps = 10000", "self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self,", "param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha)", "update_step): update_time = time.time() state, action, reward, next_state, done, gamma, weights, inds =", "self.target_policy_net(next_state) # Predict Z distribution with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach())", "network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward,", "policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param", "with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected", "self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions))", "= torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected)", "self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss =", "from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value", "pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch", "self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta)", "target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done,", "import torch import torch.nn as nn import torch.optim as optim import queue from", "= v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau =", "name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims,", "self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min,", "torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- #", "= torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict next", "value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward()", "next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) #", "import time import numpy as np import torch import torch.nn as nn import", "action = np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights", "import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update", "self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten()", "zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau )", "update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step)", "------- # Predict next actions with target policy network next_action = self.target_policy_net(next_state) #", "time.time() state, action, reward, next_state, done, gamma, weights, inds = batch state =", "inds = batch state = np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state", "Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min,", "update_step): while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch,", "policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step()", "policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 -", "input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net =", "np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done)", "= n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau = 0.001 self.gamma =", "next_action = self.target_policy_net(next_state) # Predict Z distribution with target value network target_value =", "if update_step.value % 1000 == 0: print(\"Training step \", update_step.value) training_on.value = 0", "fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta,", "self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z", "queue if update_step.value % 100 == 0: try: params = [p.data.cpu().detach().numpy() for p", "with target policy network next_action = self.target_policy_net(next_state) # Predict Z distribution with target", "= value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step()", "np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten()", "next actions with target policy network next_action = self.target_policy_net(next_state) # Predict Z distribution", "in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error)", "= 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms -", "np import torch import torch.nn as nn import torch.optim as optim import queue", "target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau ) for target_param,", "param.data * self.tau ) # Send updated learner to the queue if update_step.value", "torch.nn as nn import torch.optim as optim import queue from insomnia.utils import empty_torch_queue", "np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device)", "lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state,", "torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param,", "value_loss.mean(axis=1) # Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if", "and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max,", "= np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state =", "= self.target_policy_net(next_state) # Predict Z distribution with target value network target_value = self.target_value_net.get_probs(next_state,", "input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max =", "for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step):", "target_param.data * (1.0 - self.tau) + param.data * self.tau ) for target_param, param", "replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue", "buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) +", "= np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds =", "params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on,", "learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1) self.device = 'cuda'", "def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch =", "+ priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss", "self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean()", "Predict next actions with target policy network next_action = self.target_policy_net(next_state) # Predict Z", "torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device)", "in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau", "as np import torch import torch.nn as nn import torch.optim as optim import", "replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean()", "- self.tau) + param.data * self.tau ) # Send updated learner to the", "self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,", "Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor -----------", "* (1.0 - self.tau) + param.data * self.tau ) # Send updated learner", "target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(),", "target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer", "(1.0 - self.tau) + param.data * self.tau ) # Send updated learner to", "(self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Noise process", "self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param", "inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward", "from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\"", "= value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss = self.value_net.get_probs(state,", "0: try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def", "= np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward =", "- 1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Noise process self.noise", "beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max", "name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):", "policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net", "256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue", "= torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic -------", "while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue,", "self.tau) + param.data * self.tau ) # Send updated learner to the queue", "fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param", "= l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected =", "policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(", "torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict next actions with target policy", "self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1) self.device", "# -------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss *", "n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(),", "== 0: try: params = [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass", "update_time = time.time() state, action, reward, next_state, done, gamma, weights, inds = batch", "rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value", "% 1000 == 0: print(\"Training step \", update_step.value) training_on.value = 0 empty_torch_queue(self.learner_w_queue) empty_torch_queue(replay_priority_queue)", "self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value % 1000 == 0: print(\"Training", "n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims,", "next_state = np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state", "* torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for", "priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss", "p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while", "if torch.cuda.is_available() else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and", "critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities", "Send updated learner to the queue if update_step.value % 100 == 0: try:", "(self.v_max - self.v_min) / (self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available() else", "_update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state, action, reward, next_state, done, gamma,", "# ------- Update critic ------- # Predict next actions with target policy network", "value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state))", "param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data *", "= self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in buffer td_error =", "self.tau ) # Send updated learner to the queue if update_step.value % 100", "# Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name,", "as optim import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from", "np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step", "action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update", "training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except", "5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value", "value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss =", "= target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in", "weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad()", "nn import torch.optim as optim import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise", "= value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds,", "target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon", "1000 == 0: print(\"Training step \", update_step.value) training_on.value = 0 empty_torch_queue(self.learner_w_queue) empty_torch_queue(replay_priority_queue) print(\"Exit", "----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss,", "d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update routine.", "policy network next_action = self.target_policy_net(next_state) # Predict Z distribution with target value network", "np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights)", "self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps:", "target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau ) # Send", "target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min", "self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms", "\"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name,", "# Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net =", "lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step):", "self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net =", "d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for", "Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss", "= learner_w_queue self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1) self.device =", "= time.time() state, action, reward, next_state, done, gamma, weights, inds = batch state", "self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for", "critic ------- # Predict next actions with target policy network next_action = self.target_policy_net(next_state)", "state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done", "= 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z = (self.v_max - self.v_min)", "param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data *", "l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\" def __init__(self, policy_net,", "n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max", "v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps", "dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):", "action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic", "learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min =", "run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait()", "torch import torch.nn as nn import torch.optim as optim import queue from insomnia.utils", "= torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in", "time import numpy as np import torch import torch.nn as nn import torch.optim", "target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected =", "target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected", "LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue,", "fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims,", "distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z)", "value_loss = value_loss.mean(axis=1) # Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon =", "np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device)", "torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss", "* torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # --------", "= d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net", "* self.tau ) # Send updated learner to the queue if update_step.value %", "value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update))", ") # Send updated learner to the queue if update_step.value % 100 ==", "= self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma", "target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data", "except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try:", "and value network update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta,", "Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net = d4pg.CriticNetwork(beta,", "Predict Z distribution with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get", "__init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max,", "= policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms)", "insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc", "value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value,", "== 0: print(\"Training step \", update_step.value) training_on.value = 0 empty_torch_queue(self.learner_w_queue) empty_torch_queue(replay_priority_queue) print(\"Exit learner.\")", "self.v_min = v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size", "= 10000 self.batch_size = 256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay =", "import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy", "- self.v_min) / (self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu'", "in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer =", "v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device)", "= optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time =", "replay_priority_queue, update_step): update_time = time.time() state, action, reward, next_state, done, gamma, weights, inds", "fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in", "step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss", "update_step.value += 1 if update_step.value % 1000 == 0: print(\"Training step \", update_step.value)", "zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(),", "[p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue,", "import numpy as np import torch import torch.nn as nn import torch.optim as", "= torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # -------", "state, action, reward, next_state, done, gamma, weights, inds = batch state = np.asarray(state)", "+ param.data * self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data", "batch, replay_priority_queue, update_step): update_time = time.time() state, action, reward, next_state, done, gamma, weights,", "reward, next_state, done, gamma, weights, inds = batch state = np.asarray(state) action =", "fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms", "empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection", "next_action.detach()) # Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5,", "= np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update", "weights = np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action", "process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims,", "= np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done =", "update_step.value % 1000 == 0: print(\"Training step \", update_step.value) training_on.value = 0 empty_torch_queue(self.learner_w_queue)", "else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets", "value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in buffer td_error", "import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import", "= 'cuda' if torch.cuda.is_available() else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) #", "= batch state = np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state =", "= torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict next actions with target", "torch.sum(policy_loss, dim=1) policy_loss = -policy_loss.mean() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(),", "* (1.0 - self.tau) + param.data * self.tau ) for target_param, param in", "self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max,", "import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import", "= np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights =", "GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions,", "# Get projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms,", "self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time", "v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau = 0.001", "zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau )", "def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min,", "-------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device)", "v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size = 256", "from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class", "target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def", "priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update =", "target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data)", "optim import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models", "policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms)", "for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) +", "= d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net", "value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() #", "GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and", "= value_loss.mean(axis=1) # Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4", "\"\"\"Policy and value network update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha,", "Update priorities in buffer td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update", "target policy network next_action = self.target_policy_net(next_state) # Predict Z distribution with target value", "done = torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict next actions with", "self.value_optimizer.step() # -------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss", "1 if update_step.value % 1000 == 0: print(\"Training step \", update_step.value) training_on.value =", "done = np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state", "insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\" def", "self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions,", "zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion =", "insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object): \"\"\"Policy and value network", "import torch.optim as optim import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import", "target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(),", "self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state, action,", "= GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims,", "Value and policy nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min,", "alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min", "torch.cuda.is_available() else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy", "dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value =", "routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims,", "self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0", "self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size = 256 self.tau = 0.001 self.gamma", "= 256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue =", "v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps =", "to the queue if update_step.value % 100 == 0: try: params = [p.data.cpu().detach().numpy()", "from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from", "critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss =", "self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 -", "= (self.v_max - self.v_min) / (self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available()", "** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action)", "n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value =", "torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict next actions", "update_step) update_step.value += 1 if update_step.value % 1000 == 0: print(\"Training step \",", "replay_priority_queue, update_step) update_step.value += 1 if update_step.value % 1000 == 0: print(\"Training step", "import torch.nn as nn import torch.optim as optim import queue from insomnia.utils import", "queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg", "delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss =", "self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value <", "reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update critic ------- # Predict", "np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device)", "self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau ) #", "= batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value", "batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if", "self.delta_z = (self.v_max - self.v_min) / (self.num_atoms - 1) self.device = 'cuda' if", "torch.tensor(weights).float().to(self.device) # Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update", "# Predict Z distribution with target value network target_value = self.target_value_net.get_probs(next_state, next_action.detach()) #", "= critic_value.to(self.device) value_loss = self.value_criterion(critic_value, target_z_projected) value_loss = value_loss.mean(axis=1) # Update priorities in", "optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time()", "np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state = torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device)", "class LearnerD4PG(object): \"\"\"Policy and value network update routine. \"\"\" def __init__(self, policy_net, target_policy_net,", "in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau", "d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net = policy_net self.target_value_net", "the queue if update_step.value % 100 == 0: try: params = [p.data.cpu().detach().numpy() for", "network update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions,", "weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) #", "batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value %", "l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device)", "- self.tau) + param.data * self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):", "torch.from_numpy(next_state).float().to(self.device) action = torch.from_numpy(action).float().to(self.device) reward = torch.from_numpy(reward).float().to(self.device) done = torch.from_numpy(done).float().to(self.device) # ------- Update", "td_error = value_loss.cpu().detach().numpy().flatten() priority_epsilon = 1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon", "torch.optim as optim import queue from insomnia.utils import empty_torch_queue from insomnia.explores.gaussian_noise import GaussianActionNoise", "self.tau) + param.data * self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(", "10000 self.batch_size = 256 self.tau = 0.001 self.gamma = 0.998 self.prioritized_replay = 0", "update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims,", "actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss =", "next_state, done, gamma, weights, inds = batch state = np.asarray(state) action = np.asarray(action)", "self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion = nn.BCELoss(reduction='none')", "reward = np.asarray(reward) next_state = np.asarray(next_state) done = np.asarray(done) weights = np.asarray(weights) inds", "= np.asarray(done) weights = np.asarray(weights) inds = np.asarray(inds).flatten() state = torch.from_numpy(state).float().to(self.device) next_state =", "fc2_dims, name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms =", "1e-4 if self.prioritized_replay: weights_update = np.abs(td_error) + priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss", "actions with target policy network next_action = self.target_policy_net(next_state) # Predict Z distribution with", "target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data", ") for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau)", "for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):", "in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=beta) self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=alpha) self.value_criterion", "v_max=self.v_max, delta_z=self.delta_z) target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss", "insomnia.explores.gaussian_noise import GaussianActionNoise from insomnia.numeric_models import d4pg from insomnia.numeric_models.misc import l2_projection class LearnerD4PG(object):", "# Update step value_loss = value_loss.mean() self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor", "in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue, replay_priority_queue, update_step): while update_step.value", "= [p.data.cpu().detach().numpy() for p in self.policy_net.parameters()] self.learner_w_queue.put(params) except: pass def run(self, training_on, batch_queue,", "/ (self.num_atoms - 1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Noise", "batch state = np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state)", "* self.tau ) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_( target_param.data * (1.0", "= nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state, action, reward,", "Update critic ------- # Predict next actions with target policy network next_action =", "'cuda' if torch.cuda.is_available() else 'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value", "value network update routine. \"\"\" def __init__(self, policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims,", "projected distribution target_z_projected = l2_projection._l2_project(next_distr_v=target_value, rewards_v=reward, dones_mask_t=done, gamma=self.gamma ** 5, n_atoms=self.num_atoms, v_min=self.v_min, v_max=self.v_max,", "continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1 if update_step.value % 1000 == 0:", "batch_queue, replay_priority_queue, update_step): while update_step.value < self.num_train_steps: try: batch = batch_queue.get_nowait() except queue.Empty:", "nn.BCELoss(reduction='none') def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state, action, reward, next_state,", "priority_epsilon replay_priority_queue.put((inds, weights_update)) value_loss = value_loss * torch.tensor(weights).float().to(self.device) # Update step value_loss =", "policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1)", "target_z_projected = torch.from_numpy(target_z_projected).float().to(self.device) critic_value = self.value_net.get_probs(state, action) critic_value = critic_value.to(self.device) value_loss = self.value_criterion(critic_value,", "network next_action = self.target_policy_net(next_state) # Predict Z distribution with target value network target_value", "self.value_optimizer.zero_grad() value_loss.backward() self.value_optimizer.step() # -------- Update actor ----------- policy_loss = self.value_net.get_probs(state, self.policy_net(state)) policy_loss", "= v_min self.v_max = v_max self.num_atoms = n_atoms self.num_train_steps = 10000 self.batch_size =", "state = np.asarray(state) action = np.asarray(action) reward = np.asarray(reward) next_state = np.asarray(next_state) done", "def _update_step(self, batch, replay_priority_queue, update_step): update_time = time.time() state, action, reward, next_state, done,", "param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(param.data) self.value_optimizer", "self.policy_optimizer.step() for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau)", "self.value_net.parameters()): target_param.data.copy_( target_param.data * (1.0 - self.tau) + param.data * self.tau ) for", "= 0.001 self.gamma = 0.998 self.prioritized_replay = 0 self.learner_w_queue = learner_w_queue self.delta_z =", "try: batch = batch_queue.get_nowait() except queue.Empty: continue self._update_step(batch, replay_priority_queue, update_step) update_step.value += 1", "self.num_atoms) self.target_policy_net = target_policy_net for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()): target_param.data.copy_(param.data) for target_param,", "name, v_min, v_max, n_atoms=51): self.v_min = v_min self.v_max = v_max self.num_atoms = n_atoms", "if update_step.value % 100 == 0: try: params = [p.data.cpu().detach().numpy() for p in", "policy_net, target_policy_net, learner_w_queue, alpha, beta, input_dims, n_actions, fc1_dims, fc2_dims, name, v_min, v_max, n_atoms=51):", "= self.value_net.get_probs(state, self.policy_net(state)) policy_loss = policy_loss * torch.from_numpy(self.value_net.z_atoms).float().to(self.device) policy_loss = torch.sum(policy_loss, dim=1) policy_loss", "'cpu' # Noise process self.noise = GaussianActionNoise(mu=np.zeros(n_actions)) # Value and policy nets self.value_net", "nets self.value_net = d4pg.CriticNetwork(beta, input_dims, fc1_dims, fc2_dims, n_actions, name, self.v_min, self.v_max, self.num_atoms) self.policy_net", "learner to the queue if update_step.value % 100 == 0: try: params =", "1) self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # Noise process self.noise =" ]
[ "= pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name)", "continue to use for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name,", "= dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)]", "connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def", "server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes and their", "= opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes", "self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): groups", "1 times the values and determine the group of opc tags, which will", "list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self,", "self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items():", "get a list of all parameter names from the OPC server self._param_list =", "self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name),", "__enter__(self): return self def __exit__(self, *args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups)))", "'value': value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for", "logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string", "= value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if", "self._print('connected to OPC server ' + opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path", "is set by default ' + str(self._frequency)) else: self._frequency = frequency def get_frequency(self):", "= json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names", "client close the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error", "OpenOPC import pywintypes import datetime from builtins import print import json import copy", "= conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of", "error OPC occured\") def _print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message) def", "list_opc_values = list() for value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value =", "= None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose", "def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): groups = self._client.groups()", "self._client.groups(): # Read 1 times the values and determine the group of opc", "# TODO in production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not", "has been read from the OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError:", "for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value']", "coding: UTF-8 -*- import OpenOPC import pywintypes import datetime from builtins import print", "self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def write(self, list_data_string): try: self._client.write(list_data_string)", "None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server'])", "else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def", "dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def", "if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)),", "_get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if", "self._debug = None self._logger = None self._verbose = None self._frequency = None self._client", "value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k", "values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self):", "the values and determine the group of opc tags, which will continue to", "sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def", "value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug", "name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or", "write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message):", "determine the group of opc tags, which will continue to use for name,", "= 5 self._print('data refresh rate is set by default ' + str(self._frequency)) else:", "self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter names", "try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message): if self._verbose:", "param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in current_values_list: cur_time =", "if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as", "None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug = conf_settings['debug']", "= conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter names from", "conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s", "return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in current_values_list: cur_time", "= logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings):", "the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes", "self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string = '' def get_parameters_name_string(self): return self._parameters_name_string", "for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict)", "group of opc tags, which will continue to use for name, value, quality,", "names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return", "= conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string =", "if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if", "def _print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string =", "tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name,", "= None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string =", "groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def write(self, list_data_string):", "which will continue to use for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0',", "code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in", "def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self,", "opc tags, which will continue to use for name, value, quality, timeRecord in", "set by default ' + str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return", "tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts =", "_set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string", "= list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple", "_set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server", "update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1):", "next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string = '' def get_parameters_name_string(self): return", "in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data has been", "return self def __exit__(self, *args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close()", "ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger = None self._verbose = None", "get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP", "pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' +", "tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args): self.close() def close(self):", "in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple)", "def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else:", "-*- import OpenOPC import pywintypes import datetime from builtins import print import json", "self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP request tags_settings_file_path", "will continue to use for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1):", "convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in current_values_list: cur_time = value_dict.pop('time', None)", "value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value,", "list() for value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None)", "def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path =", "'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys =", "current_date_string = datetime.datetime.now() param_array = list() try: if not self._client.groups(): # Read 1", "import copy import logging import os class ConnectionOPC: def __init__(self, conf_settings): self._debug =", "def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value':", "= None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get", "def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in current_values_list: cur_time = value_dict.pop('time',", "and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string()", "_get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value", "use for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string))", "dictionaries of tag codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes", "print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try:", "= list() for value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value',", "self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes and their OPC", "pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name) def", "self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality, timeRecord in", "cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name)", "json import copy import logging import os class ConnectionOPC: def __init__(self, conf_settings): self._debug", "self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client", "name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for", "= self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def write(self, list_data_string): try:", "cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client()", "conf_settings): frequency = conf_settings['frequency'] if frequency is None: self._frequency = 5 self._print('data refresh", "else: for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if", "self._print(\"Timeout error OPC occured\") def _print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message)", "logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level =", "param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name,", "Read 1 times the values and determine the group of opc tags, which", "TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in", "= tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args): self.close() def", "dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] =", "conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter names from the", "' + str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod def", "except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list()", "os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def", "datetime from builtins import print import json import copy import logging import os", "None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string = None", "TODO in production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path:", "conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r')", "' + opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not", "def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def", "return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if", "None self._client = None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None", "occured\") def _print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string", "self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message): if self._verbose: print(message)", "return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value =", "in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list):", "current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name,", "= value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name,", "param_array = list() try: if not self._client.groups(): # Read 1 times the values", "for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else:", "= None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings)", "self._frequency = None self._client = None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes", "= None self._frequency = None self._client = None self._param_list = None self._dict_codes_plotting_names =", "self._verbose = None self._frequency = None self._client = None self._param_list = None self._dict_codes_plotting_names", "self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name) def _set_logger(self,", "self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self,", "in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def", "debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level,", "value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name,", "code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys", "self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys()))", "import pywintypes import datetime from builtins import print import json import copy import", "logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency =", "self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self,", "preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath(", "from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag", "current_values_list): list_opc_values = list() for value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value", "def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return", "dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names =", "dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] =", "# -*- coding: UTF-8 -*- import OpenOPC import pywintypes import datetime from builtins", "cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name):", "of opc tags, which will continue to use for name, value, quality, timeRecord", "opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server '", "os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level", "os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts", "= list() try: if not self._client.groups(): # Read 1 times the values and", "in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys =", "opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return", "tag codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes']", "in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys", "for value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name", "def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably an", "self def __exit__(self, *args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC", "self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except", "values and determine the group of opc tags, which will continue to use", "self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is None:", "%(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if", "return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k])", "filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is", "conf_settings['frequency'] if frequency is None: self._frequency = 5 self._print('data refresh rate is set", "str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): #", "self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values =", "= frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production,", "self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string = ''", "= OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name) def _set_logger(self, conf_settings):", "self._logger = None self._verbose = None self._frequency = None self._client = None self._param_list", "import json import copy import logging import os class ConnectionOPC: def __init__(self, conf_settings):", "OPC server ' + opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path']", "or self._verbose: self._print('Data has been read from the OPC') for item in param_array:", "python3.6 # -*- coding: UTF-8 -*- import OpenOPC import pywintypes import datetime from", "if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try: if", "self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list()", "a list of all parameter names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'],", "logging import os class ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger =", "def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try: if not self._client.groups(): #", "quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data", "def _get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path']", "if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s',", "current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()):", "self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string", "= None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names =", "= None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug =", "self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args):", "logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency", "read from the OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError", "if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array =", "read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for", "self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client =", "been read from the OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC", "not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file:", "copy import logging import os class ConnectionOPC: def __init__(self, conf_settings): self._debug = None", "list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple =", "dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return", "# get a list of all parameter names from the OPC server self._param_list", "cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose:", "with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names):", "import OpenOPC import pywintypes import datetime from builtins import print import json import", "request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json')))", "self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list", "tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self", "self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter", "the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\")", "= os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file)", "OPC occured\") def _print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self):", "self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose']", "dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def", "if not self._client.groups(): # Read 1 times the values and determine the group", "rate is set by default ' + str(self._frequency)) else: self._frequency = frequency def", "to use for name, value, quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value,", "OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes and", "_get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self,", "self._debug or self._verbose: self._print('Data has been read from the OPC') for item in", "for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self):", "def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is None: self._frequency = 5", "= {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list =", "debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger", "self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) #", "# get dictionaries of tag codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings)", "close the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC", "codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names", "all parameter names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get", "from builtins import print import json import copy import logging import os class", "os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else:", "= dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name,", "def __init__(self, conf_settings): self._debug = None self._logger = None self._verbose = None self._frequency", "self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug", "class ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger = None self._verbose =", "+ opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path:", "_set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath(", "dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name,", "an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath(", "self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter names from the OPC", "def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value", "item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self,", "@staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP request tags_settings_file_path =", "get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value", "self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection')", "else: self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO", "not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string:", "list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict()", "self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def", "_print(self, message): if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now()", "return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) +", "current_date_string)) else: for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string))", "self._set_opc_client(conf_settings['opc_server']) # get a list of all parameter names from the OPC server", "list() try: if not self._client.groups(): # Read 1 times the values and determine", "if self._debug or self._verbose: self._print('Data has been read from the OPC') for item", "the group of opc tags, which will continue to use for name, value,", "'../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self,", "for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug", "_get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value =", "self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args): self.close()", "tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with", "current_date_string)) if self._debug or self._verbose: self._print('Data has been read from the OPC') for", "builtins import print import json import copy import logging import os class ConnectionOPC:", "logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level", "_set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names", "update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data has been read from", "frequency is None: self._frequency = 5 self._print('data refresh rate is set by default", "occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict in current_values_list:", "OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message): if self._verbose: print(message) if self._debug:", "to OPC server ' + opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path =", "None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time))", "is None: self._frequency = 5 self._print('data refresh rate is set by default '", "tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts", "= os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string)", "= self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self): return self def", "return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected", "UTF-8 -*- import OpenOPC import pywintypes import datetime from builtins import print import", "if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string =", "group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality, timeRecord in self._client.iread(group='Group0',", "= conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path,", "param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values", "not self._client.groups(): # Read 1 times the values and determine the group of", "return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably an HTTP request", "k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return", "print import json import copy import logging import os class ConnectionOPC: def __init__(self,", "'../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG", "logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is None: self._frequency =", "value_dict in current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name =", "#!/usr/bin/env python3.6 # -*- coding: UTF-8 -*- import OpenOPC import pywintypes import datetime", "self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings)", "frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in production, preferably", "timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data has", "OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name) def _set_logger(self, conf_settings): if", "by default ' + str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return self._frequency", "self._print('Data has been read from the OPC') for item in param_array: self._print(item) except", "None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a", "= self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes and their OPC names", "value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod def", "import logging import os class ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger", "= self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string =", "self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try: if not", "self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime =", "value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values()))", "%(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency", "= datetime.datetime.now() param_array = list() try: if not self._client.groups(): # Read 1 times", "HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)),", "self._print('OPC client close the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout", "list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message): if", "= dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys", "def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string):", "*args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the", "None self._logger = None self._verbose = None self._frequency = None self._client = None", "value, current_date_string)) if self._debug or self._verbose: self._print('Data has been read from the OPC')", "list of all parameter names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True)", "server ' + opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if", "value, current_date_string)) else: for name, value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value,", "None self._dict_code_keys_opc_names = None self._parameters_name_string = None self._debug = conf_settings['debug'] self._set_logger(conf_settings) self._verbose =", "self._set_parameters_name_string() def __enter__(self): return self def __exit__(self, *args): self.close() def close(self): groups =", "for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self):", "debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path)", "self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self):", "if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime", "self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try: if not self._client.groups():", "os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/tags_settings_sample.json'))) with open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return", "= conf_settings['frequency'] if frequency is None: self._frequency = 5 self._print('data refresh rate is", "@staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list)", "their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def", "None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or", "default ' + str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod", "dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list", "or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType", "def __exit__(self, *args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client", "parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod", "def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to OPC", "datetime.datetime.now() param_array = list() try: if not self._client.groups(): # Read 1 times the", "None: self._frequency = 5 self._print('data refresh rate is set by default ' +", "refresh rate is set by default ' + str(self._frequency)) else: self._frequency = frequency", "self._verbose: self._print('Data has been read from the OPC') for item in param_array: self._print(item)", "def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def", "self._client = None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names", "current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict):", "self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of tag codes and their OPC names tags_settings_dicts", "tags, which will continue to use for name, value, quality, timeRecord in self._client.iread(self._param_list,", "_set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is None: self._frequency = 5 self._print('data", "return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items():", "5 self._print('data refresh rate is set by default ' + str(self._frequency)) else: self._frequency", "json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in", "= logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger =", "debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self,", "as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict()", "in current_values_list: cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict)", "dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string = '' def", "timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality,", "<reponame>Shanginre/OPCDataTransfer<filename>OPCDataTransfer/OPC/OPC.py #!/usr/bin/env python3.6 # -*- coding: UTF-8 -*- import OpenOPC import pywintypes import", "{**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list()", "conf_settings): self._debug = None self._logger = None self._verbose = None self._frequency = None", "conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath(", "value, quality, timeRecord in self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose:", "import os class ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger = None", "self._print('data refresh rate is set by default ' + str(self._frequency)) else: self._frequency =", "tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names", "get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value,", "times the values and determine the group of opc tags, which will continue", "os class ConnectionOPC: def __init__(self, conf_settings): self._debug = None self._logger = None self._verbose", "= next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time' else: self._parameters_name_string = '' def get_parameters_name_string(self):", "get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array = list() try: if not self._client.groups(): # Read", "opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return", "list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names =", "code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name,", "__init__(self, conf_settings): self._debug = None self._logger = None self._verbose = None self._frequency =", "= code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict", "self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for value_dict", "dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names:", "and determine the group of opc tags, which will continue to use for", "logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__)", "close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close the connection') def write(self,", "try: if not self._client.groups(): # Read 1 times the values and determine the", "in production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path", "-*- coding: UTF-8 -*- import OpenOPC import pywintypes import datetime from builtins import", "= None self._logger = None self._verbose = None self._frequency = None self._client =", "_set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in", "parameter names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries", "frequency = conf_settings['frequency'] if frequency is None: self._frequency = 5 self._print('data refresh rate", "for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def", "names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) # get dictionaries of", "import print import json import copy import logging import os class ConnectionOPC: def", "None self._verbose = None self._frequency = None self._client = None self._param_list = None", "= None self._verbose = None self._frequency = None self._client = None self._param_list =", "param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data has been read from the", "dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value,", "message): if self._verbose: print(message) if self._debug: self._logger.info(message) def get_list_of_current_values(self): current_date_string = datetime.datetime.now() param_array", "of all parameter names from the OPC server self._param_list = self._client.list(conf_settings['tags_branch_opc_server'], recursive=True) #", "OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array", "pywintypes import datetime from builtins import print import json import copy import logging", "opc_server_name) def _set_logger(self, conf_settings): if self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path", "open(tags_settings_file_path, 'r') as read_file: tags_settings_dicts = json.load(read_file) return tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys", "tags_settings_dicts def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for", "OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return param_array def convert_simulation_data_to_opc_data(self, current_values_list): list_opc_values = list() for", "None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes = None self._dict_code_keys_opc_names = None", "def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for opc_name, codes_dict in self._dict_opc_names_codes.items(): dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name", "conf_settings['debug'] self._set_logger(conf_settings) self._verbose = conf_settings['verbose'] self._set_frequency(conf_settings) self._set_opc_client(conf_settings['opc_server']) # get a list of all", "+ str(self._frequency)) else: self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings):", "production, preferably an HTTP request tags_settings_file_path = conf_settings['tags_settings_file_path'] if not tags_settings_file_path: tags_settings_file_path =", "import datetime from builtins import print import json import copy import logging import", "self._client.connect(opc_server_name) self._print('connected to OPC server ' + opc_server_name) def _set_logger(self, conf_settings): if self._debug:", "= logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency'] if frequency is None: self._frequency", "'time': current_date_string} return dict_param_value @staticmethod def _get_sorted_tuple_values_from_dict(_dict): values_list = list() for k in", "dict_with_tuple_keys[self._get_sorted_tuple_values_from_dict(codes_dict)] = opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return", "from the OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\")", "__exit__(self, *args): self.close() def close(self): groups = self._client.groups() self._client.remove(copy.deepcopy(list(groups))) self._client.close() self._print('OPC client close", "return self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time':", "self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string = ','.join(list(dict_codes_first_value.keys())) + ',value,time'", "values_list = list() for k in sorted(_dict.keys()): values_list.append(_dict[k]) return tuple(values_list) def _get_opc_tag_name(self, value_dict):", "OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names() self._set_parameters_name_string() def __enter__(self):", "list_opc_values def _set_opc_client(self, opc_server_name): pywintypes.datetime = pywintypes.TimeType self._client = OpenOPC.client() self._client.connect(opc_server_name) self._print('connected to", "self._client.iread(group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) if self._debug or self._verbose: self._print('Data has been read", "= self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value)) if self._debug or self._verbose: self._print((opc_tag_name, cur_value, cur_time)) return list_opc_values", "quality, timeRecord in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value,", "the OPC') for item in param_array: self._print(item) except OpenOPC.TimeoutError: self._print(\"OPC TimeoutError occured\") return", "self._frequency = frequency def get_frequency(self): return self._frequency @staticmethod def _get_settings_dicts(conf_settings): # TODO in", "dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])] = code_plotting_name_dict['value'] self._dict_codes_plotting_names = dict_with_tuple_keys def _set_dict_code_keys_opc_names(self): dict_with_tuple_keys = dict() for", "def _set_dict_codes_plotting_names(self, dict_codes_plotting_names): dict_with_tuple_keys = dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict", "format='%(asctime)s %(name)s %(levelname)s:%(message)s', filename=logs_file_path) self._logger = logging.getLogger(__name__) def _set_frequency(self, conf_settings): frequency = conf_settings['frequency']", "keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes: dict_codes_first_value = next(iter(self._dict_opc_names_codes.values())) self._parameters_name_string", "= dict() for tag_name, list_codes_plotting_names in dict_codes_plotting_names.items(): for code_plotting_name_dict in list_codes_plotting_names: dict_with_tuple_keys[(tag_name, code_plotting_name_dict['key'])]", "of tag codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes =", "self._dict_opc_names_codes def _get_dict_from_opc_data(self, parameter_name, value, current_date_string): dict_param_value = {**self._dict_opc_names_codes.get(parameter_name), 'value': value, 'time': current_date_string}", "= None self._client = None self._param_list = None self._dict_codes_plotting_names = None self._dict_opc_names_codes =", "tuple(values_list) def _get_opc_tag_name(self, value_dict): keys_tuple = self._get_sorted_tuple_values_from_dict(value_dict) return self._dict_code_keys_opc_names.get(keys_tuple) def _set_parameters_name_string(self): if self._dict_opc_names_codes:", "self._debug: logs_file_path = conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log')))", "os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level =", "codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names']) self._dict_opc_names_codes = tags_settings_dicts['opc_names_and_codes'] self._set_dict_code_keys_opc_names()", "self._client.close() self._print('OPC client close the connection') def write(self, list_data_string): try: self._client.write(list_data_string) except OpenOPC.TimeoutError:", "recursive=True) # get dictionaries of tag codes and their OPC names tags_settings_dicts =", "in self._client.iread(self._param_list, group='Group0', update=1): param_array.append(self._get_dict_from_opc_data(name, value, current_date_string)) else: for name, value, quality, timeRecord", "self._frequency = 5 self._print('data refresh rate is set by default ' + str(self._frequency))", "conf_settings['logs_file_path'] if not logs_file_path: logs_file_path = os.path.abspath( os.path.realpath( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../Data/logs.log'))) debug_level_string = conf_settings['debug_level']", "opc_name self._dict_code_keys_opc_names = dict_with_tuple_keys def get_codes_plotting_names_dict(self): return self._dict_codes_plotting_names def get_opc_names_codes_dict(self): return self._dict_opc_names_codes def", "# Read 1 times the values and determine the group of opc tags,", "except OpenOPC.TimeoutError: self._print(\"Timeout error OPC occured\") def _print(self, message): if self._verbose: print(message) if", "cur_time = value_dict.pop('time', None) cur_value = value_dict.pop('value', None) opc_tag_name = self._get_opc_tag_name(value_dict) list_opc_values.append((opc_tag_name, cur_value))", "get dictionaries of tag codes and their OPC names tags_settings_dicts = self._get_settings_dicts(conf_settings) self._set_dict_codes_plotting_names(tags_settings_dicts['codes_and_plotting_names'])", "None self._frequency = None self._client = None self._param_list = None self._dict_codes_plotting_names = None", "= conf_settings['debug_level'] if debug_level_string: debug_level = logging.getLevelName(debug_level_string) else: debug_level = logging.DEBUG logging.basicConfig(level=debug_level, format='%(asctime)s", "if frequency is None: self._frequency = 5 self._print('data refresh rate is set by" ]
[ "<gh_stars>0 import unittest import si7021 class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for", "os if os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action,", "be less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def", "or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less or equal to", "== 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max):", "raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value =", "param_name, min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or", "unittest import si7021 class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows, but", "if os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name,", "'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value", "self.assertLessEqual(value, max, f'{param_name} must be less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity,", "equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less or equal to {max}.')", "si7021 class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows, but should check", "f'{param_name} must be less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0,", "for embedded platform import os if os.name == 'Windows': raise OSError self._sensor =", "or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature,", "Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows, but should check for embedded", "class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows, but should check for", "be greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less or", "action, param_name, min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must be greater", "TODO: checks for Windows, but should check for embedded platform import os if", "si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name}", "Windows, but should check for embedded platform import os if os.name == 'Windows':", "# TODO: checks for Windows, but should check for embedded platform import os", "setUp(self): # TODO: checks for Windows, but should check for embedded platform import", "but should check for embedded platform import os if os.name == 'Windows': raise", "import os if os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self,", "to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less or equal to {max}.') def", "self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name}", "{min}.') self.assertLessEqual(value, max, f'{param_name} must be less or equal to {max}.') def test_read_humidity(self):", "must be less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80)", "os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min,", "'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85) if __name__ == '__main__':", "check for embedded platform import os if os.name == 'Windows': raise OSError self._sensor", "import unittest import si7021 class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows,", "OSError self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value = action()", "self._sensor = si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value = action() self.assertGreaterEqual(value,", "min, f'{param_name} must be greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must", "max, f'{param_name} must be less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity',", "_assert_in_range(self, action, param_name, min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must be", "action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to {min}.') self.assertLessEqual(value, max,", "to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10,", "less or equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self):", "self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85) if __name__ ==", "= action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to {min}.') self.assertLessEqual(value,", "embedded platform import os if os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor()", "def setUp(self): # TODO: checks for Windows, but should check for embedded platform", "def _assert_in_range(self, action, param_name, min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must", "= si7021.Si7021Sensor() def _assert_in_range(self, action, param_name, min, max): value = action() self.assertGreaterEqual(value, min,", "platform import os if os.name == 'Windows': raise OSError self._sensor = si7021.Si7021Sensor() def", "import si7021 class Si7021SensorTests(unittest.TestCase): def setUp(self): # TODO: checks for Windows, but should", "equal to {max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature',", "f'{param_name} must be greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be", "min, max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal", "{max}.') def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85)", "0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85) if __name__ == '__main__': unittest.main()", "def test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85) if", "checks for Windows, but should check for embedded platform import os if os.name", "value = action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to {min}.')", "should check for embedded platform import os if os.name == 'Windows': raise OSError", "must be greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less", "for Windows, but should check for embedded platform import os if os.name ==", "greater or equal to {min}.') self.assertLessEqual(value, max, f'{param_name} must be less or equal", "max): value = action() self.assertGreaterEqual(value, min, f'{param_name} must be greater or equal to", "test_read_humidity(self): self._assert_in_range(self._sensor.humidity, 'Humidity', 0, 80) def test_read_temperature(self): self._assert_in_range(self._sensor.temperature, 'Temperature', -10, 85) if __name__" ]
[ "import * logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs):", "get(url + '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def", "+ '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='',", "# noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/'", "token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization': \"Bearer \" +", "get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def", "logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url", "+ key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url", "# noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization':", "key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json()", "+ '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal", "PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name,", "logging from wrapper import * logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='',", "\" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return", "PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \"", "**kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection", "key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json()", "# noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization':", "noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer", "key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url", "# noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens',", "noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization':", "\"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs):", "key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url +", "'/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='',", "'/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def", "noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' +", "**kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json()", "def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \"", "PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer", "post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection", "\" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs):", "timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url +", "token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name':", "PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \"", "'/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='',", "get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key},", "+ key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return", "timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name},", "key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs):", "* logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return", "noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer", "= logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url +", "get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \" + key},", "headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='',", "create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" +", "return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal", "import logging from wrapper import * logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def", "token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url", "def get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \" +", "+ key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60,", "**kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection", "logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60, **kwargs): return get(url + '/user',", "data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return", "def delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization':", "headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60,", "\"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='',", "\" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='', key='', token_name='',", "delete_user_token(url='', key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization': \"Bearer", "+ '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='',", "timeout=timeout).json() # noinspection PyUnusedLocal def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens',", "timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization': \"Bearer \" + key},", "headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() # noinspection PyUnusedLocal def delete_user_token(url='',", "wrapper import * logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='', timeout=60,", "\"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60,", "def get_user_tokens(url='', key='', timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" +", "timeout=60, **kwargs): return get(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() #", "key='', token_name='', timeout=60, **kwargs): return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key},", "timeout=timeout).json() # noinspection PyUnusedLocal def create_user_token(url='', key='', token_name='', timeout=60, **kwargs): return post(url +", "return post(url + '/user/tokens', headers={'Authorization': \"Bearer \" + key}, data={'name': token_name}, timeout=timeout).json() #", "timeout=60, **kwargs): return get(url + '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() #", "key='', token_name='', timeout=60, **kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization': \"Bearer \"", "**kwargs): return delete(url + '/user/tokens/' + token_name, headers={'Authorization': \"Bearer \" + key}, timeout=timeout)", "from wrapper import * logger = logging.getLogger(__name__) # noinspection PyUnusedLocal def get_user(url='', key='',", "return get(url + '/user', headers={'Authorization': \"Bearer \" + key}, timeout=timeout).json() # noinspection PyUnusedLocal" ]
[ "except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that", "None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options) return lwr_client", "def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj,", "get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self,", "self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data.", "a (this) object store - just passing them along to the LWR unmodified.", "obj.id return kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\",", "from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to", "obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) #", "files returned by a (this) object store - just passing them along to", "= config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport)", "self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds):", "import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store", "def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self,", "**kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None", "**kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def", "Galaxy at some future point or significantly modified. \"\"\" def __init__(self, config, config_xml):", "return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj,", "then allow Galaxy to fully manage jobs on remote servers with completely different", "absolute_import # Need to import lwr_client absolutely. from ..objectstore import ObjectStore try: from", "**kwds): kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\")", "**kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO:", "job destinations would then allow Galaxy to fully manage jobs on remote servers", "obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self, obj, **kwds):", "__build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self, config_xml): url", "be more aspirational than practical for now, it would be good to Galaxy", "beta and may be dropped from Galaxy at some future point or significantly", "LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to a remote LWR server. This", "along to the LWR unmodified. That modification - along with this implementation and", "LWR unmodified. That modification - along with this implementation and LWR job destinations", "Object store implementation that delegates to a remote LWR server. This may be", "at some future point or significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client", "**kwds)) # TODO: Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def", "return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return", "Galaxy to a point that a handler thread could be setup that doesn't", "return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None):", "exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds))", "get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return", "config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token)", "be good to Galaxy to a point that a handler thread could be", "size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds))", "allow Galaxy to fully manage jobs on remote servers with completely different mount", "self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds):", "import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager", "**kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return", "**kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return", "LWR server. This may be more aspirational than practical for now, it would", "just passing them along to the LWR unmodified. That modification - along with", "**kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return", "self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return", "to fully manage jobs on remote servers with completely different mount points. This", "__future__ import absolute_import # Need to import lwr_client absolutely. from ..objectstore import ObjectStore", "empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds))", "obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def", "**kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self, obj, **kwds): return", "kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token", "return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj,", "None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self,", "to a point that a handler thread could be setup that doesn't attempt", "return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj,", "thread could be setup that doesn't attempt to access the disk files returned", "dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options) return lwr_client def shutdown(self): pass", "ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates", "manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options) return lwr_client def", "self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds):", "the LWR unmodified. That modification - along with this implementation and LWR job", "may be more aspirational than practical for now, it would be good to", "Galaxy to fully manage jobs on remote servers with completely different mount points.", "ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation", "be setup that doesn't attempt to access the disk files returned by a", "This may be more aspirational than practical for now, it would be good", "some future point or significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client =", "modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds):", "class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to a remote LWR server.", "obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def", "that delegates to a remote LWR server. This may be more aspirational than", "def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj,", "obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None,", "LWR job destinations would then allow Galaxy to fully manage jobs on remote", "future point or significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml)", "modification - along with this implementation and LWR job destinations would then allow", "and LWR job destinations would then allow Galaxy to fully manage jobs on", "config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options", "That modification - along with this implementation and LWR job destinations would then", "self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds):", "a point that a handler thread could be setup that doesn't attempt to", "return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize", "a handler thread could be setup that doesn't attempt to access the disk", "def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj,", "**kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self,", "server. This may be more aspirational than practical for now, it would be", "doesn't attempt to access the disk files returned by a (this) object store", "This implementation should be considered beta and may be dropped from Galaxy at", "return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj,", "that a handler thread could be setup that doesn't attempt to access the", "Need to import lwr_client absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import", "than practical for now, it would be good to Galaxy to a point", "be considered beta and may be dropped from Galaxy at some future point", "def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self, config_xml):", "**kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self,", "that doesn't attempt to access the disk files returned by a (this) object", "self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return", "self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent()", "extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return", "from Galaxy at some future point or significantly modified. \"\"\" def __init__(self, config,", "# Need to import lwr_client absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager", "self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds))", "update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj,", "points. This implementation should be considered beta and may be dropped from Galaxy", "def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id']", "should be considered beta and may be dropped from Galaxy at some future", "import absolute_import # Need to import lwr_client absolutely. from ..objectstore import ObjectStore try:", "return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj,", "from __future__ import absolute_import # Need to import lwr_client absolutely. from ..objectstore import", "for now, it would be good to Galaxy to a point that a", "self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds):", "ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to a remote LWR", "to a remote LWR server. This may be more aspirational than practical for", "= dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options) return lwr_client def shutdown(self):", "file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds))", "**kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return", "# TODO: Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self,", "obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id", "**kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self,", "access the disk files returned by a (this) object store - just passing", "significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj,", "def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj,", "from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager", "by a (this) object store - just passing them along to the LWR", "**kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return", "lwr_client absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError:", "Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds):", "import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to a remote", "TODO: Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj,", "along with this implementation and LWR job destinations would then allow Galaxy to", "point or significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def", "completely different mount points. This implementation should be considered beta and may be", "return kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None)", "manage jobs on remote servers with completely different mount points. This implementation should", "implementation should be considered beta and may be dropped from Galaxy at some", "self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds):", "unmodified. That modification - along with this implementation and LWR job destinations would", "def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def", "obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def", "\"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return", "object store - just passing them along to the LWR unmodified. That modification", "obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def", "try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore):", "handler thread could be setup that doesn't attempt to access the disk files", "store implementation that delegates to a remote LWR server. This may be more", "jobs on remote servers with completely different mount points. This implementation should be", "def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\",", "private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options =", "return None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds pass def", "the disk files returned by a (this) object store - just passing them", "on remote servers with completely different mount points. This implementation should be considered", "config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def", "absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from", "ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class", "servers with completely different mount points. This implementation should be considered beta and", "implementation that delegates to a remote LWR server. This may be more aspirational", "may be dropped from Galaxy at some future point or significantly modified. \"\"\"", "or significantly modified. \"\"\" def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self,", "them along to the LWR unmodified. That modification - along with this implementation", "to access the disk files returned by a (this) object store - just", "disk files returned by a (this) object store - just passing them along", "__build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None)", "remote servers with completely different mount points. This implementation should be considered beta", "remote LWR server. This may be more aspirational than practical for now, it", "mount points. This implementation should be considered beta and may be dropped from", "more aspirational than practical for now, it would be good to Galaxy to", "a remote LWR server. This may be more aspirational than practical for now,", "delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self, obj,", "returned by a (this) object store - just passing them along to the", "obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def", "extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds", "now, it would be good to Galaxy to a point that a handler", "transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client =", "def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj,", "this implementation and LWR job destinations would then allow Galaxy to fully manage", "None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client", "= self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj, **kwds):", "destinations would then allow Galaxy to fully manage jobs on remote servers with", "pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport =", "dropped from Galaxy at some future point or significantly modified. \"\"\" def __init__(self,", "**kwds)) def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def", "return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj,", "to the LWR unmodified. That modification - along with this implementation and LWR", "galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object", "with completely different mount points. This implementation should be considered beta and may", "attempt to access the disk files returned by a (this) object store -", "**kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj, **kwds)) def delete(self,", "be dropped from Galaxy at some future point or significantly modified. \"\"\" def", "and may be dropped from Galaxy at some future point or significantly modified.", "config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options", "def __init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj,", "self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self, obj,", "with this implementation and LWR job destinations would then allow Galaxy to fully", "kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport", "delegates to a remote LWR server. This may be more aspirational than practical", "passing them along to the LWR unmodified. That modification - along with this", "could be setup that doesn't attempt to access the disk files returned by", "considered beta and may be dropped from Galaxy at some future point or", "config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds)) def file_ready(self,", "fully manage jobs on remote servers with completely different mount points. This implementation", "return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj,", "..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import", "alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id'] = obj.id return kwds pass", "good to Galaxy to a point that a handler thread could be setup", "create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj, **kwds)) def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds))", "would then allow Galaxy to fully manage jobs on remote servers with completely", "obj, **kwds): kwds['object_id'] = obj.id return kwds pass def __build_lwr_client(self, config_xml): url =", "to Galaxy to a point that a handler thread could be setup that", "url = config_xml.get(\"url\") private_token = config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options =", "\"\"\" Object store implementation that delegates to a remote LWR server. This may", "def file_ready(self, obj, **kwds): return self.lwr_client.file_ready(**self.__build_kwds(obj, **kwds)) def create(self, obj, **kwds): return self.lwr_client.create(**self.__build_kwds(obj,", "implementation and LWR job destinations would then allow Galaxy to fully manage jobs", "config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options) return", "from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except ImportError: from lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\"", "__init__(self, config, config_xml): self.lwr_client = self.__build_lwr_client(config_xml) def exists(self, obj, **kwds): return self.lwr_client.exists(**self.__build_kwds(obj, **kwds))", "lwr.lwr_client.manager import ObjectStoreClientManager class LwrObjectStore(ObjectStore): \"\"\" Object store implementation that delegates to a", "= obj.id return kwds pass def __build_lwr_client(self, config_xml): url = config_xml.get(\"url\") private_token =", "get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds))", "= config_xml.get(\"private_token\", None) transport = config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url,", "it would be good to Galaxy to a point that a handler thread", "get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False, alt_name=None): return None def __build_kwds(self, obj, **kwds): kwds['object_id'] =", "setup that doesn't attempt to access the disk files returned by a (this)", "aspirational than practical for now, it would be good to Galaxy to a", "practical for now, it would be good to Galaxy to a point that", "store - just passing them along to the LWR unmodified. That modification -", "**kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self,", "obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def", "- just passing them along to the LWR unmodified. That modification - along", "(this) object store - just passing them along to the LWR unmodified. That", "to import lwr_client absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager", "**kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds)) def update_from_file(self, obj, **kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self):", "would be good to Galaxy to a point that a handler thread could", "get_data(self, obj, **kwds): return self.lwr_client.get_data(**self.__build_kwds(obj, **kwds)) def get_filename(self, obj, **kwds): return self.lwr_client.get_filename(**self.__build_kwds(obj, **kwds))", "def delete(self, obj, **kwds): return self.lwr_client.delete(**self.__build_kwds(obj, **kwds)) # TODO: Optimize get_data. def get_data(self,", "**kwds): return self.lwr_client.update_from_file(**self.__build_kwds(obj, **kwds)) def get_store_usage_percent(self): return self.lwr_client.get_store_usage_percent() def get_object_url(self, obj, extra_dir=None, extra_dir_at_root=False,", "import lwr_client absolutely. from ..objectstore import ObjectStore try: from galaxy.jobs.runners.lwr_client.manager import ObjectStoreClientManager except", "- along with this implementation and LWR job destinations would then allow Galaxy", "def empty(self, obj, **kwds): return self.lwr_client.empty(**self.__build_kwds(obj, **kwds)) def size(self, obj, **kwds): return self.lwr_client.size(**self.__build_kwds(obj,", "point that a handler thread could be setup that doesn't attempt to access", "different mount points. This implementation should be considered beta and may be dropped", "= config_xml.get(\"transport\", None) manager_options = dict(transport=transport) client_options = dict(url=url, private_token=private_token) lwr_client = ObjectStoreClientManager(**manager_options).get_client(client_options)" ]
[ "s, t): dic1, dic2 = [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] +=", "s, t): dic1, dic2 = {}, {} for item in s: dic1[item] =", "= dic2.get(item, 0) + 1 return dic1 == dic2 def isAnagram2(self, s, t):", "for item in s: dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')] +=", "dic1 == dic2 def isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26 for", "return dic1 == dic2 def isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26", "in s: dic1[item] = dic1.get(item, 0) + 1 for item in t: dic2[item]", "+ 1 for item in t: dic2[item] = dic2.get(item, 0) + 1 return", "isAnagram1(self, s, t): dic1, dic2 = {}, {} for item in s: dic1[item]", "item in t: dic2[item] = dic2.get(item, 0) + 1 return dic1 == dic2", "+ 1 return dic1 == dic2 def isAnagram2(self, s, t): dic1, dic2 =", "in t: dic2[item] = dic2.get(item, 0) + 1 return dic1 == dic2 def", "s: dic1[item] = dic1.get(item, 0) + 1 for item in t: dic2[item] =", "t): dic1, dic2 = {}, {} for item in s: dic1[item] = dic1.get(item,", "t: dic2[item] = dic2.get(item, 0) + 1 return dic1 == dic2 def isAnagram2(self,", "{}, {} for item in s: dic1[item] = dic1.get(item, 0) + 1 for", "def isAnagram1(self, s, t): dic1, dic2 = {}, {} for item in s:", "dic2[item] = dic2.get(item, 0) + 1 return dic1 == dic2 def isAnagram2(self, s,", "1 return dic1 == dic2 def isAnagram2(self, s, t): dic1, dic2 = [0]*26,", "isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')]", "1 for item in t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def", "for item in t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def isAnagram3(self,", "+= 1 return dic1 == dic2 def isAnagram3(self, s, t): return sorted(s) ==", "dic1, dic2 = {}, {} for item in s: dic1[item] = dic1.get(item, 0)", "#https://leetcode.com/problems/valid-anagram/ #https://leetcode.com/problems/valid-anagram/discuss/66499/Python-solutions-(sort-and-dictionary). def isAnagram1(self, s, t): dic1, dic2 = {}, {} for item", "= dic1.get(item, 0) + 1 for item in t: dic2[item] = dic2.get(item, 0)", "1 for item in t: dic2[item] = dic2.get(item, 0) + 1 return dic1", "0) + 1 return dic1 == dic2 def isAnagram2(self, s, t): dic1, dic2", "#https://leetcode.com/problems/valid-anagram/discuss/66499/Python-solutions-(sort-and-dictionary). def isAnagram1(self, s, t): dic1, dic2 = {}, {} for item in", "dic2 = [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] += 1 for item", "= {}, {} for item in s: dic1[item] = dic1.get(item, 0) + 1", "def isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26 for item in s:", "[0]*26 for item in s: dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')]", "t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def isAnagram3(self, s, t): return", "in t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def isAnagram3(self, s, t):", "[0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] += 1 for item in t:", "dic2 = {}, {} for item in s: dic1[item] = dic1.get(item, 0) +", "item in s: dic1[item] = dic1.get(item, 0) + 1 for item in t:", "1 return dic1 == dic2 def isAnagram3(self, s, t): return sorted(s) == sorted(t)", "dic1[item] = dic1.get(item, 0) + 1 for item in t: dic2[item] = dic2.get(item,", "dic2.get(item, 0) + 1 return dic1 == dic2 def isAnagram2(self, s, t): dic1,", "+= 1 for item in t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2", "{} for item in s: dic1[item] = dic1.get(item, 0) + 1 for item", "for item in t: dic2[item] = dic2.get(item, 0) + 1 return dic1 ==", "0) + 1 for item in t: dic2[item] = dic2.get(item, 0) + 1", "dic2 def isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26 for item in", "dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')] += 1 return dic1 ==", "= [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] += 1 for item in", "item in s: dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')] += 1", "in s: dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')] += 1 return", "dic1.get(item, 0) + 1 for item in t: dic2[item] = dic2.get(item, 0) +", "== dic2 def isAnagram2(self, s, t): dic1, dic2 = [0]*26, [0]*26 for item", "dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def isAnagram3(self, s, t): return sorted(s)", "item in t: dic2[ord(item)-ord('a')] += 1 return dic1 == dic2 def isAnagram3(self, s,", "s: dic1[ord(item)-ord('a')] += 1 for item in t: dic2[ord(item)-ord('a')] += 1 return dic1", "t): dic1, dic2 = [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] += 1", "for item in s: dic1[item] = dic1.get(item, 0) + 1 for item in", "dic1, dic2 = [0]*26, [0]*26 for item in s: dic1[ord(item)-ord('a')] += 1 for" ]
[ "on Geophysics, 2nd ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars", "Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the", "_Constant( abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value / (_np.pi * 4", "value, unit, uncertainty, and reference. \"\"\" from __future__ import absolute_import as _absolute_import from", "* gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars", "reference='Derived from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of", "60), unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi / 360 / (24", "unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi / 360 / (24 *", "the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). '", "Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A", "274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the Mars", "and ' 'topography of the terrestrial planets. In <NAME> & <NAME> ' '(Eds.),", "* 3 * mass_mars.value * r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ),", "of the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME>", "r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek,", "reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field and orientation", "as _np from astropy.constants import Constant as _Constant from astropy.constants import G as", "106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars reference", "<NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). '", "name, value, unit, uncertainty, and reference. \"\"\" from __future__ import absolute_import as _absolute_import", "r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin", "gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307", "* 60 * 60), unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi /", "uncertainty=0.000000003 * 2 * _np.pi / 360 / (24 * 60 * 60),", "rate of Mars', value=350.891985307 * 2 * _np.pi / 360 / (24 *", "(_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars =", "'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2", "tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 *", "360 / (24 * 60 * 60), unit='rad / s', uncertainty=0.000000003 * 2", "_Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>,", "object is an astropy Constant that possesses the attributes name, value, unit, uncertainty,", "import division as _division from __future__ import print_function as _print_function import numpy as", "_absolute_import from __future__ import division as _division from __future__ import print_function as _print_function", "as _Constant from astropy.constants import G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational", "G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the mass of", "import Constant as _Constant from astropy.constants import G as _G gm_mars = _Constant(", "reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). '", "s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ),", "' 'A new reference equipotential surface, and reference ellipsoid for ' 'the planet", "value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2", "' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant(", "/ m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 + (3", "Mars at mean planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2,", "planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant(", "from __future__ import division as _division from __future__ import print_function as _print_function import", "+ b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new", "+ (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars", "abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>,", "__future__ import absolute_import as _absolute_import from __future__ import division as _division from __future__", "_np.pi / 360 / (24 * 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016).", "gravity of Mars at mean planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value", "as _print_function import numpy as _np from astropy.constants import Constant as _Constant from", "_G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived", "= _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2", "), reference='Derived from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate", "as _absolute_import from __future__ import division as _division from __future__ import print_function as", "106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the Mars", "/ _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and", "constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>,", "* 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi *", "' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars", "name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and", "+ (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). '", "/ a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 +", "unit, uncertainty, and reference. \"\"\" from __future__ import absolute_import as _absolute_import from __future__", "r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars", "density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value / (_np.pi", "ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference", "_Constant from astropy.constants import G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant", "'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0,", "value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential", "name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>.,", "import absolute_import as _absolute_import from __future__ import division as _division from __future__ import", "b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference", "orbiter ' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars =", "orientation from Mars orbiter ' 'and lander tracking data, Icarus, 274, 253-260, '", "from __future__ import absolute_import as _absolute_import from __future__ import division as _division from", "'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars',", "_Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 +", "and <NAME> (2010). ' 'A new reference equipotential surface, and reference ellipsoid for", "planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant(", "from gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3,", "import G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the mass", "reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new", "G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600:", "as _division from __future__ import print_function as _print_function import numpy as _np from", "* r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean", "Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential", "* mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 + (3 * 3 *", "of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty *", "a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010).", "' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant(", "of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty", "omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 * 2 *", "<filename>pyshtools/constant/Mars.py \"\"\" pyshtools constants for the planet Mars. Each object is an astropy", "* 60), unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi / 360 /", "from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars", "= _Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0,", "'An improved JPL Mars gravity field and orientation from Mars orbiter ' 'and", "_Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015).", "60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field and", "reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new", "'topography of the terrestrial planets. In <NAME> & <NAME> ' '(Eds.), Treatise on", "' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars", "), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface, and", "s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field", "equipotential surface, and reference ellipsoid for ' 'the planet Mars. Earth, Moon, and", "unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from", "tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of", "(2010). ' 'A new reference equipotential surface, and reference ellipsoid for ' 'the", "* 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field", "Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal", "/ _G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean", "name='Angular spin rate of Mars', value=350.891985307 * 2 * _np.pi / 360 /", "Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value", "reference equipotential surface, and reference ellipsoid for ' 'the planet Mars. Earth, Moon,", "mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at", "/ (_np.pi * 4 * r_mars.value**3))**2 + (3 * 3 * mass_mars.value *", "r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty", "lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor", "360 / (24 * 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An", "uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived", "'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars =", "_G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars = _Constant( abbrev='r_mars',", "ellipsoid for ' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13, '", "/ r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular", "(24 * 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL", "name='Mean density of Mars', value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3),", "of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010).", "274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value /", "'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars =", "and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of", "'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars', value=3 *", "and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity", "* r_mars.value**3))**2 + (3 * 3 * mass_mars.value * r_mars.uncertainty / (_np.pi *", "unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference", "spin rate of Mars', value=350.891985307 * 2 * _np.pi / 360 / (24", "_np from astropy.constants import Constant as _Constant from astropy.constants import G as _G", "Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean", "radius, ' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty", "planet Mars. Each object is an astropy Constant that possesses the attributes name,", "4 * r_mars.value**3))**2 + (3 * 3 * mass_mars.value * r_mars.uncertainty / (_np.pi", "' 'topography of the terrestrial planets. In <NAME> & <NAME> ' '(Eds.), Treatise", "b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2", "= _Constant( abbrev='gm_mars', name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3 /", "10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density", "and reference. \"\"\" from __future__ import absolute_import as _absolute_import from __future__ import division", "and orientation from Mars orbiter ' 'and lander tracking data, Icarus, 274, 253-260,", "Constant as _Constant from astropy.constants import G as _G gm_mars = _Constant( abbrev='gm_mars',", "Mars', value=350.891985307 * 2 * _np.pi / 360 / (24 * 60 *", "a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0, unit='m',", "r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2", "'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the reference ellipsoid',", "and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0,", "' '(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). ' 'Oxford,", "pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of", "Wieczorek, <NAME>. (2015). Gravity and ' 'topography of the terrestrial planets. In <NAME>", "* 2 * _np.pi / 360 / (24 * 60 * 60), unit='rad", "* r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and", "(2016). ' 'An improved JPL Mars gravity field and orientation from Mars orbiter", "the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value", "153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars',", "'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value -", "Geophysics, 2nd ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars =", "/ 360 / (24 * 60 * 60), unit='rad / s', uncertainty=0.000000003 *", "Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved", "mean planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m /", "60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity", "_G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14,", "import numpy as _np from astropy.constants import Constant as _Constant from astropy.constants import", "from astropy.constants import G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times", "Mars. Each object is an astropy Constant that possesses the attributes name, value,", "the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). '", "<NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field and orientation from", "print_function as _print_function import numpy as _np from astropy.constants import Constant as _Constant", "(_np.pi * 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi", "and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the", "4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars',", "uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2", "(_np.pi * 4 * r_mars.value**3))**2 + (3 * 3 * mass_mars.value * r_mars.uncertainty", "_print_function import numpy as _np from astropy.constants import Constant as _Constant from astropy.constants", "s', uncertainty=0.000000003 * 2 * _np.pi / 360 / (24 * 60 *", "abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='',", "unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography of the terrestrial", "Mars orbiter ' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars", "<NAME> (2010). ' 'A new reference equipotential surface, and reference ellipsoid for '", "uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity field and", "unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars", "Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical", "<NAME> & <NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp.", "mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty /", "a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2)", "b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0, unit='m',", "'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty", "* _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars = _Constant(", "m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 + (3 *", "gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3", "Gravity and ' 'topography of the terrestrial planets. In <NAME> & <NAME> '", "JPL Mars gravity field and orientation from Mars orbiter ' 'and lander tracking", "* r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars = _Constant(", "'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid', value=3377678.0,", "(gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars =", "r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.')", "_Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 * 2 * _np.pi /", "Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography of", "that possesses the attributes name, value, unit, uncertainty, and reference. \"\"\" from __future__", "/ r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value *", "/ s', uncertainty=0.000000003 * 2 * _np.pi / 360 / (24 * 60", "Mars orbiter ' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars", "the attributes name, value, unit, uncertainty, and reference. \"\"\" from __future__ import absolute_import", "absolute_import as _absolute_import from __future__ import division as _division from __future__ import print_function", "density of Mars', value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg", "astropy.constants import Constant as _Constant from astropy.constants import G as _G gm_mars =", "+ (3 * 3 * mass_mars.value * r_mars.uncertainty / (_np.pi * 4 *", "= _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the reference ellipsoid', value=12654875.0, unit='m2", "2 * _np.pi / 360 / (24 * 60 * 60), unit='rad /", "Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the", "(a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>,", "1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the", "3 * mass_mars.value * r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived", "Mars', value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg / m3',", "= _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>.", "of the terrestrial planets. In <NAME> & <NAME> ' '(Eds.), Treatise on Geophysics,", "as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the mass of Mars',", "* r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 *", "of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010).", "times the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME>", "* (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ),", "planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2',", "name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and", "4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4", "ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars',", "for ' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.')", "reference ellipsoid for ' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13,", "_G.value**2)**2 ), reference='Derived from gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius", "'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars =", "normal gravity potential of the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>,", "Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A", "(2015). Gravity and ' 'topography of the terrestrial planets. In <NAME> & <NAME>", "for the planet Mars. Each object is an astropy Constant that possesses the", "division as _division from __future__ import print_function as _print_function import numpy as _np", "= _Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 * 2 * _np.pi", "/ (24 * 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved", "- b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>,", "Constant that possesses the attributes name, value, unit, uncertainty, and reference. \"\"\" from", "r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at mean planetary", "orbiter ' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars =", "planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant(", "f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value)", "abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 * 2 * _np.pi / 360", "* _np.pi / 360 / (24 * 60 * 60), reference='<NAME>., <NAME>, <NAME>", "axis of the Mars reference ellipsoid', value=3377678.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME>", "Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.')", "/ s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2", "planets. In <NAME> & <NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed., Vol.", "improved JPL Mars gravity field and orientation from Mars orbiter ' 'and lander", "unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 +", "the planet Mars. Each object is an astropy Constant that possesses the attributes", "_G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars and G.')", "' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the reference", "r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 * 2", "/ (24 * 60 * 60), unit='rad / s', uncertainty=0.000000003 * 2 *", "axis of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME>", "' 'the planet Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars", "), reference='Derived from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity", "/ (_np.pi * 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty /", "abbrev='gm_mars', name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6,", "value=350.891985307 * 2 * _np.pi / 360 / (24 * 60 * 60),", "value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3", "abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>,", "* mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 *", "uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface, and", "attributes name, value, unit, uncertainty, and reference. \"\"\" from __future__ import absolute_import as", "data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars',", "uncertainty, and reference. \"\"\" from __future__ import absolute_import as _absolute_import from __future__ import", "Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor", "unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty /", "value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new", "253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value,", "* 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars = _Constant(", "rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 +", "g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at mean planetary radius,", "possesses the attributes name, value, unit, uncertainty, and reference. \"\"\" from __future__ import", "reference='Derived from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of", "_Constant( abbrev='gm_mars', name='Gravitational constant times the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2',", "the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010).", "Each object is an astropy Constant that possesses the attributes name, value, unit,", "of Mars at mean planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value /", "Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value -", "u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the reference ellipsoid', value=12654875.0,", "_Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value,", "60 * 60), unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi / 360", "data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of", "value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2 * gm_mars.value", "field and orientation from Mars orbiter ' 'and lander tracking data, Icarus, 274,", "' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty /", "/ a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME>", "_Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of the reference ellipsoid', value=12654875.0, unit='m2 /", "Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars", "and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of Mars', value=350.891985307 *", "and tides', value=gm_mars.value / r_mars.value**2, unit='m / s2', uncertainty=_np.sqrt((gm_mars.uncertainty / r_mars.value**2)**2 + (2", "mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). '", "/ 360 / (24 * 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). '", "terrestrial planets. In <NAME> & <NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed.,", "a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface,", "Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening", "reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography of the terrestrial planets. In", "'(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon,", "the mass of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016).", "unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface,", "Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars',", "abbrev='g0_mars', name='Mean surface gravity of Mars at mean planetary radius, ' 'ignoring rotation", "mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 + (3 * 3 * mass_mars.value", "(24 * 60 * 60), unit='rad / s', uncertainty=0.000000003 * 2 * _np.pi", "' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg',", "from __future__ import print_function as _print_function import numpy as _np from astropy.constants import", "' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars', value=3", "2nd ed., Vol. 10, pp. 153-193). ' 'Oxford, Elsevier-Pergamon, doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant(", "'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars',", "'A new reference equipotential surface, and reference ellipsoid for ' 'the planet Mars.", "potential of the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and", "astropy Constant that possesses the attributes name, value, unit, uncertainty, and reference. \"\"\"", "* 2 * _np.pi / 360 / (24 * 60 * 60), reference='<NAME>.,", "_Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>,", "uncertainty=_np.sqrt((3 * mass_mars.uncertainty / (_np.pi * 4 * r_mars.value**3))**2 + (3 * 3", "gravity field and orientation from Mars orbiter ' 'and lander tracking data, Icarus,", "/ s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars gravity", "+ (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and", "* mass_mars.value * r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from", "/ s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential", "surface gravity of Mars at mean planetary radius, ' 'ignoring rotation and tides',", "gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars =", "' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value", "' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid',", "and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at mean", "new reference equipotential surface, and reference ellipsoid for ' 'the planet Mars. Earth,", "at mean planetary radius, ' 'ignoring rotation and tides', value=gm_mars.value / r_mars.value**2, unit='m", "surface, and reference ellipsoid for ' 'the planet Mars. Earth, Moon, and Planets,", "Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis", "2 * _np.pi / 360 / (24 * 60 * 60), reference='<NAME>., <NAME>,", "abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity", "name='Flattening of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty", "an astropy Constant that possesses the attributes name, value, unit, uncertainty, and reference.", "= _Constant( abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value / (_np.pi *", "abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value / (_np.pi * 4 *", "gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars', value=3389.500e3, unit='m',", "value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography of the", "__future__ import division as _division from __future__ import print_function as _print_function import numpy", "s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface,", "/ (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars", "ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A", "the terrestrial planets. In <NAME> & <NAME> ' '(Eds.), Treatise on Geophysics, 2nd", "value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL", "<NAME>. (2015). Gravity and ' 'topography of the terrestrial planets. In <NAME> &", "of Mars', value=0.4282837581575610e+14, unit='m3 / s2', uncertainty=0.18167460e+6, reference='<NAME>., <NAME>, <NAME> (2016). ' 'An", "name='Mean surface gravity of Mars at mean planetary radius, ' 'ignoring rotation and", "and reference ellipsoid for ' 'the planet Mars. Earth, Moon, and Planets, 106,", "numpy as _np from astropy.constants import Constant as _Constant from astropy.constants import G", "In <NAME> & <NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10,", "- b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 +", "of Mars', value=3 * mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg /", "_Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at mean planetary radius, ' 'ignoring", "__future__ import print_function as _print_function import numpy as _np from astropy.constants import Constant", "1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid',", "/ _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ),", "= _Constant( abbrev='f_mars', name='Flattening of the Mars reference ellipsoid', value=(a_mars.value - b_mars.value) /", "mass_mars.value / (_np.pi * 4 * r_mars.value**3), unit='kg / m3', uncertainty=_np.sqrt((3 * mass_mars.uncertainty", "from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars', name='Angular spin rate of Mars',", "radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and '", "Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty /", "Mars gravity field and orientation from Mars orbiter ' 'and lander tracking data,", "1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference", "ellipsoid', value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference", "reference='Derived from gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of Mars',", "253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference", "reference. \"\"\" from __future__ import absolute_import as _absolute_import from __future__ import division as", "reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface, and reference", "abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value", "pyshtools constants for the planet Mars. Each object is an astropy Constant that", "), reference='Derived from gm_mars and G.') r_mars = _Constant( abbrev='r_mars', name='Mean radius of", "uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography of the terrestrial planets.", "tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') a_mars = _Constant( abbrev='a_mars', name='Semimajor axis", "= _Constant( abbrev='g0_mars', name='Mean surface gravity of Mars at mean planetary radius, '", "gravity potential of the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0, reference='<NAME>, <NAME>,", "is an astropy Constant that possesses the attributes name, value, unit, uncertainty, and", "mass_mars.value * r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2 ), reference='Derived from mass_mars", "_division from __future__ import print_function as _print_function import numpy as _np from astropy.constants", "_np.pi / 360 / (24 * 60 * 60), unit='rad / s', uncertainty=0.000000003", "\"\"\" pyshtools constants for the planet Mars. Each object is an astropy Constant", "b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and", "(3 * 3 * mass_mars.value * r_mars.uncertainty / (_np.pi * 4 * r_mars.value**4))**2", "Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars', name='Flattening of", "Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars',", "astropy.constants import G as _G gm_mars = _Constant( abbrev='gm_mars', name='Gravitational constant times the", "* 60 * 60), reference='<NAME>., <NAME>, <NAME> (2016). ' 'An improved JPL Mars", "& <NAME> ' '(Eds.), Treatise on Geophysics, 2nd ed., Vol. 10, pp. 153-193).", "<NAME> (2016). ' 'An improved JPL Mars gravity field and orientation from Mars", "/ r_mars.value**2)**2 + (2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from", "unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) / a_mars.value**2)**2 + (_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) /", "constants for the planet Mars. Each object is an astropy Constant that possesses", "from astropy.constants import Constant as _Constant from astropy.constants import G as _G gm_mars", "(_np.sqrt(a_mars.uncertainty**2 + b_mars.uncertainty**2) / a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A", "ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value) /", "r_mars.value**4))**2 ), reference='Derived from mass_mars and r_mars.') g0_mars = _Constant( abbrev='g0_mars', name='Mean surface", "r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.') omega_mars = _Constant( abbrev='omega_mars',", "name='Theoretical normal gravity potential of the reference ellipsoid', value=12654875.0, unit='m2 / s2', uncertainty=69.0,", "= _Constant( abbrev='a_mars', name='Semimajor axis of the Mars reference ellipsoid', value=3395428.0, unit='m', uncertainty=19.0,", "\"\"\" from __future__ import absolute_import as _absolute_import from __future__ import division as _division", "of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and ' 'topography", "lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass", "r_mars.value**3))**2 + (3 * 3 * mass_mars.value * r_mars.uncertainty / (_np.pi * 4", "value=3395428.0, unit='m', uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential", "reference ellipsoid', value=(a_mars.value - b_mars.value) / a_mars.value, unit='', uncertainty=_np.sqrt((a_mars.uncertainty * (a_mars.value - b_mars.value)", "Mars. Earth, Moon, and Planets, 106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') f_mars = _Constant( abbrev='f_mars',", "Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052') mass_mars = _Constant( abbrev='mass_mars', name='Mass of Mars', value=gm_mars.value", "uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2 ), reference='Derived from gm_mars", "value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value * _G.uncertainty / _G.value**2)**2", "doi:10.1016/B978-0-444-53802-4.00169-X.') density_mars = _Constant( abbrev='density_mars', name='Mean density of Mars', value=3 * mass_mars.value /", "(2 * gm_mars.value * r_mars.uncertainty / r_mars.value**3)**2 ), reference='Derived from gm_mars and r_mars.')", "name='Mass of Mars', value=gm_mars.value / _G.value, unit='kg', uncertainty=_np.sqrt((gm_mars.uncertainty / _G.value)**2 + (gm_mars.value *", "abbrev='u0_mars', name='Theoretical normal gravity potential of the reference ellipsoid', value=12654875.0, unit='m2 / s2',", "uncertainty=19.0, reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential surface, and", "* 4 * r_mars.value**3))**2 + (3 * 3 * mass_mars.value * r_mars.uncertainty /", "<NAME>, and <NAME> (2010). ' 'A new reference equipotential surface, and reference ellipsoid", "' 'An improved JPL Mars gravity field and orientation from Mars orbiter '", "name='Mean radius of Mars', value=3389.500e3, unit='m', uncertainty=0.0, reference='MarsTopo2600: Wieczorek, <NAME>. (2015). Gravity and", "106, 1-13, ' 'doi:10.1007/s11038-009-9342-7.') u0_mars = _Constant( abbrev='u0_mars', name='Theoretical normal gravity potential of", "/ a_mars.value)**2 ), reference='<NAME>, <NAME>, and <NAME> (2010). ' 'A new reference equipotential", "of Mars', value=350.891985307 * 2 * _np.pi / 360 / (24 * 60", "* _np.pi / 360 / (24 * 60 * 60), unit='rad / s',", "import print_function as _print_function import numpy as _np from astropy.constants import Constant as", "from Mars orbiter ' 'and lander tracking data, Icarus, 274, 253-260, ' 'doi:10.1016/j.icarus.2016.02.052')", "' 'doi:10.1007/s11038-009-9342-7.') b_mars = _Constant( abbrev='b_mars', name='Semiminor axis of the Mars reference ellipsoid'," ]
[ "elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations:", "}), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] ==", "found\", errors=list()): \"\"\" Convenience function for returning a JSON response that includes appropriate", "for a date of birth \"\"\" #Do whatever you need to do... return", "def do_something_useful(): #Confidently use the data in request.json... return jsonify(dict(status='OK')) if __name__ ==", "check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params =", "[] def check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields = type(requirements) ==", "\"__main__\": with app.test_client() as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\":", "follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] == 422", "\"\"\" Decorator used to validate JSON input to an API request \"\"\" if", "client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict", "is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific fields errors", "email address', lambda email: email is not None and EMAIL_REGEX.match(email)), ('email', \"This email", "first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify a valid email address',", "response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\":", "from traceback import format_exception from flask import jsonify, request import sys from flask.exceptions", "bad_json_error_response() #Check for specific fields errors = [] def check_required_fields(data, fields): for field,", "Does the supplied date string meet our criteria for a date of birth", "@app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your last", "value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error", "include for public APIs etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value,", "valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the data in request.json... return jsonify(dict(status='OK'))", "from flask import Flask import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource", "jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as client: response = client.post( '/do/something',", "error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in", "errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal use, nice", "func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON", "for public APIs etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb))", "supplied (or it didn't parse correctly) try: if request.json is None: return bad_json_error_response()", "data.get(field) in (None, ''): if nested_fields: error_msg = requirements.get('message') else: error_msg = requirements", "Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] == 'Please", "errors=errors, success=False)) response.status_code = code return response def bad_json_error_response(): \"\"\" Convenience function for", "email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth) ]", "Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\")", "assert response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\",", "the supplied date string meet our criteria for a date of birth \"\"\"", "provide a valid email address', lambda email: email is not None and EMAIL_REGEX.match(email)),", "not found\", errors=list()): \"\"\" Convenience function for returning a JSON response that includes", "meet our criteria for a date of birth \"\"\" #Do whatever you need", "== \"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert", "need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first", "do_something_useful(): #Confidently use the data in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\":", "assert response.status_code == 422 assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation", "= inspect.getargspec(validation_func).args func_params = [] for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params):", "= type(requirements) == dict if data.get(field) in (None, ''): if nested_fields: error_msg =", "email address', 'date_of_birth':'Please provide your date of birth' }, validations=[ ('email', 'Please provide", "functools import json from traceback import format_exception from flask import jsonify, request import", "\"\"\" #Do whatever you need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={", "a problem parsing the supplied JSON data. Please send valid JSON.\") def json_required(func=None,", "name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please provide your date of birth'", "data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict", "problem parsing the supplied JSON data. Please send valid JSON.\") def json_required(func=None, required_fields={},", "\"\"\" Check to see if this email is already registered \"\"\" #Run a", "validations: func_args = inspect.getargspec(validation_func).args func_params = [] for arg in func_args: func_params.append(request.json.get(arg)) if", "decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if this email", "for debugging #Probably don't want to include for public APIs etype, value, tb", "'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For", "was supplied (or it didn't parse correctly) try: if request.json is None: return", "message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX =", "check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields = type(requirements) == dict if", "\"\"\" return api_error_response(code=400, message=\"There was a problem parsing the supplied JSON data. Please", "email is not None and EMAIL_REGEX.match(email)), ('email', \"This email is already in use.", "if this email is already registered \"\"\" #Run a query, use an ORM,", "is already registered \"\"\" #Run a query, use an ORM, use Twilio to", "message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience function for returning a JSON", "valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet our criteria for a date", "function for returning an error message related to malformed/missing JSON data. \"\"\" return", "(None, ''): if nested_fields: error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field': field,", "birth \"\"\" #Do whatever you need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required(", "in the API response for debugging #Probably don't want to include for public", "= ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return", "registered \"\"\" #Run a query, use an ORM, use Twilio to call someone", "valid email address', lambda email: email is not None and EMAIL_REGEX.match(email)), ('email', \"This", "\"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return response def", "address', lambda email: email is not None and EMAIL_REGEX.match(email)), ('email', \"This email is", "an error message related to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was", "response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return response def bad_json_error_response():", "try a different email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date of", "the traceback in the API response for debugging #Probably don't want to include", "message=\"There was a problem parsing the supplied JSON data. Please send valid JSON.\")", "\"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code", "re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if this email is already registered", "date of birth' }, validations=[ ('email', 'Please provide a valid email address', lambda", "if data.get(field) in (None, ''): if nested_fields: error_msg = requirements.get('message') else: error_msg =", "in (None, ''): if nested_fields: error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field':", "flask import Flask import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was", "have the traceback in the API response for debugging #Probably don't want to", "input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\"", "#If no JSON was supplied (or it didn't parse correctly) try: if request.json", "re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\"", ") def do_something_useful(): #Confidently use the data in request.json... return jsonify(dict(status='OK')) if __name__", "bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific fields errors = [] def", "requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields',", "Failed\", errors=errors) except Exception: #For internal use, nice to have the traceback in", "return response def bad_json_error_response(): \"\"\" Convenience function for returning an error message related", "\"This email is already in use. Please try a different email address.\", verify_account_available),", "request import sys from flask.exceptions import JSONBadRequest from flask import Flask import re", "def decorated_function(*args, **kwargs): try: #If no JSON was supplied (or it didn't parse", "not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\",", "json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\":", "response that includes appropriate error messages and code. \"\"\" response = jsonify(dict(code=code, message=message,", "use an ORM, use Twilio to call someone and ask them :-) return", "return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if this", "valid email address', 'date_of_birth':'Please provide your date of birth' }, validations=[ ('email', 'Please", "= code return response def bad_json_error_response(): \"\"\" Convenience function for returning an error", "nested_fields = type(requirements) == dict if data.get(field) in (None, ''): if nested_fields: error_msg", "return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON was", "except JSONBadRequest: return bad_json_error_response() #Check for specific fields errors = [] def check_required_fields(data,", "criteria for a date of birth \"\"\" #Do whatever you need to do...", "True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet our criteria for", "traceback import format_exception from flask import jsonify, request import sys from flask.exceptions import", "is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no", "to see if this email is already registered \"\"\" #Run a query, use", "different email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth)", "ORM, use Twilio to call someone and ask them :-) return True def", "#Confidently use the data in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with", "assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] == 'Please provide", "errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field,", "'Please provide a valid date of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently", "== \"__main__\": with app.test_client() as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\",", "func_args = inspect.getargspec(validation_func).args func_params = [] for arg in func_args: func_params.append(request.json.get(arg)) if not", "Please try a different email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date", "\"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message']", "API request \"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def", "api_error_response(code=400, message=\"There was a problem parsing the supplied JSON data. Please send valid", "Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] ==", "a query, use an ORM, use Twilio to call someone and ask them", "provide your first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify a valid", "required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON input to an API request", "\"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response", "validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def", "provide your last name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please provide your", "sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating API input\",", "requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args", "try: #If no JSON was supplied (or it didn't parse correctly) try: if", "#Run a query, use an ORM, use Twilio to call someone and ask", "decorator for API input validation/error handling \"\"\" import inspect import functools import json", "validations=[]): \"\"\" Decorator used to validate JSON input to an API request \"\"\"", "our criteria for a date of birth \"\"\" #Do whatever you need to", "email is already in use. Please try a different email address.\", verify_account_available), ('date_of_birth',", "date string meet our criteria for a date of birth \"\"\" #Do whatever", "for validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params = [] for", "specify a valid email address', 'date_of_birth':'Please provide your date of birth' }, validations=[", "decorated_function(*args, **kwargs): try: #If no JSON was supplied (or it didn't parse correctly)", "== 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\"", "sys from flask.exceptions import JSONBadRequest from flask import Flask import re app =", "JSONBadRequest: return bad_json_error_response() #Check for specific fields errors = [] def check_required_fields(data, fields):", "messages and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code", "if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal use,", "\"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422 assert response_dict['code']", "message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal use, nice to have the", "json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON input to an API", "= requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields)", "want to include for public APIs etype, value, tb = sys.exc_info() error_info =", "1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] == 'Please provide your first name.'", "Twilio to call someone and ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\"", "required_fields) for validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params = []", "validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors)", "birth' }, validations=[ ('email', 'Please provide a valid email address', lambda email: email", "response.status_code == 422 assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation Failed\"", "of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the data in request.json...", "Demo of json_required decorator for API input validation/error handling \"\"\" import inspect import", "None and EMAIL_REGEX.match(email)), ('email', \"This email is already in use. Please try a", "returning an error message related to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There", "response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) == 1", "client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\":", "nested_fields: error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif", "not None and EMAIL_REGEX.match(email)), ('email', \"This email is already in use. Please try", "a different email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date of birth',", "validation/error handling \"\"\" import inspect import functools import json from traceback import format_exception", "''): if nested_fields: error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message':", "verify_account_available(email): \"\"\" Check to see if this email is already registered \"\"\" #Run", "errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except", "JSON data. Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used", "error messages and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code =", "\"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status']", "('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth) ] ) def do_something_useful():", "assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] ==", "JSON was supplied (or it didn't parse correctly) try: if request.json is None:", "didn't parse correctly) try: if request.json is None: return bad_json_error_response() except JSONBadRequest: return", "\"\"\" Convenience function for returning a JSON response that includes appropriate error messages", "field, requirements in fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field) in (None,", "validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params = [] for arg in func_args:", "def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON input to an", "= sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating API", "to include for public APIs etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype,", "app.test_client() as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\":", "\"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] ==", "json_required decorator for API input validation/error handling \"\"\" import inspect import functools import", "'email':'Please specify a valid email address', 'date_of_birth':'Please provide your date of birth' },", "as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\",", ":-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet our", "= requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field],", "data. \"\"\" return api_error_response(code=400, message=\"There was a problem parsing the supplied JSON data.", "'Please provide a valid email address', lambda email: email is not None and", "this email is already registered \"\"\" #Run a query, use an ORM, use", "return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal use, nice to", "for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if", "code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return response", "Check to see if this email is already registered \"\"\" #Run a query,", "request.json is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific fields", "someone and ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied", "requirements in fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field) in (None, ''):", "\"\"\" Convenience function for returning an error message related to malformed/missing JSON data.", "in use. Please try a different email address.\", verify_account_available), ('date_of_birth', 'Please provide a", "the API response for debugging #Probably don't want to include for public APIs", "tb)) return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return", "a valid date of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the", "if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try:", "try: if request.json is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for", "for API input validation/error handling \"\"\" import inspect import functools import json from", "use, nice to have the traceback in the API response for debugging #Probably", "of birth \"\"\" #Do whatever you need to do... return True @app.route(\"/do/something\", methods=['POST'])", "valid date of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the data", "\"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response =", "JSON input to an API request \"\"\" if func is None: return functools.partial(json_required,", "''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args,", "response_dict = json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] == 422 assert response_dict['message']", "you need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your", "('email', \"This email is already in use. Please try a different email address.\",", "check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations: func_args =", "errors=list()): \"\"\" Convenience function for returning a JSON response that includes appropriate error", "for returning an error message related to malformed/missing JSON data. \"\"\" return api_error_response(code=400,", "it didn't parse correctly) try: if request.json is None: return bad_json_error_response() except JSONBadRequest:", "error message related to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was a", "traceback in the API response for debugging #Probably don't want to include for", "tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating", "success=False)) response.status_code = code return response def bad_json_error_response(): \"\"\" Convenience function for returning", "to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\",", "with app.test_client() as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\",", "return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to", "the data in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as", "app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience", "api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal use, nice to have", "jsonify, request import sys from flask.exceptions import JSONBadRequest from flask import Flask import", "birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the data in request.json... return", "\"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs):", "\"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data)", "type(requirements) == dict if data.get(field) in (None, ''): if nested_fields: error_msg = requirements.get('message')", "validations=[ ('email', 'Please provide a valid email address', lambda email: email is not", "malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was a problem parsing the supplied", "= [] for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message':", "do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please", "use. Please try a different email address.\", verify_account_available), ('date_of_birth', 'Please provide a valid", "**kwargs): try: #If no JSON was supplied (or it didn't parse correctly) try:", "else: error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {}))", "already in use. Please try a different email address.\", verify_account_available), ('date_of_birth', 'Please provide", "import JSONBadRequest from flask import Flask import re app = Flask(__name__) def api_error_response(code=404,", "= json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] == 422 assert response_dict['message'] ==", "for specific fields errors = [] def check_required_fields(data, fields): for field, requirements in", "from flask.exceptions import JSONBadRequest from flask import Flask import re app = Flask(__name__)", "#Do whatever you need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please", "json from traceback import format_exception from flask import jsonify, request import sys from", "data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data)", "in fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field) in (None, ''): if", "= client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }),", "to have the traceback in the API response for debugging #Probably don't want", "validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params = [] for arg in", "response = client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\"", "includes appropriate error messages and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False))", "of json_required decorator for API input validation/error handling \"\"\" import inspect import functools", "for returning a JSON response that includes appropriate error messages and code. \"\"\"", "inspect.getargspec(validation_func).args func_params = [] for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field':", "supplied date string meet our criteria for a date of birth \"\"\" #Do", "functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON was supplied", "\"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict =", "APIs etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500,", "validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON was supplied (or it", "to call someone and ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does", "input validation/error handling \"\"\" import inspect import functools import json from traceback import", "\"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422", "etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal", "('email', 'Please provide a valid email address', lambda email: email is not None", "response def bad_json_error_response(): \"\"\" Convenience function for returning an error message related to", "api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience function for returning a", "already registered \"\"\" #Run a query, use an ORM, use Twilio to call", "returning a JSON response that includes appropriate error messages and code. \"\"\" response", "import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()):", "api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX", "specific fields errors = [] def check_required_fields(data, fields): for field, requirements in fields.iteritems():", "JSON data. \"\"\" return api_error_response(code=400, message=\"There was a problem parsing the supplied JSON", "(or it didn't parse correctly) try: if request.json is None: return bad_json_error_response() except", "\"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422 assert", "def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience function for returning", "}, validations=[ ('email', 'Please provide a valid email address', lambda email: email is", "client.post( '/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True,", "request \"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args,", "[] for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message})", "required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify", "supplied JSON data. Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator", "\"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK'", "required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON was supplied (or", "used to validate JSON input to an API request \"\"\" if func is", "assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) ==", "import json from traceback import format_exception from flask import jsonify, request import sys", "and EMAIL_REGEX.match(email)), ('email', \"This email is already in use. Please try a different", "Exception: #For internal use, nice to have the traceback in the API response", "your last name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please provide your date", "was a problem parsing the supplied JSON data. Please send valid JSON.\") def", "import sys from flask.exceptions import JSONBadRequest from flask import Flask import re app", "import inspect import functools import json from traceback import format_exception from flask import", "fields errors = [] def check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields", "len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] == 'Please provide your", "email: email is not None and EMAIL_REGEX.match(email)), ('email', \"This email is already in", "Validation Failed\", errors=errors) except Exception: #For internal use, nice to have the traceback", "API response for debugging #Probably don't want to include for public APIs etype,", "EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if this email is", "a valid email address', lambda email: email is not None and EMAIL_REGEX.match(email)), ('email',", "#Check for specific fields errors = [] def check_required_fields(data, fields): for field, requirements", "to validate JSON input to an API request \"\"\" if func is None:", "a JSON response that includes appropriate error messages and code. \"\"\" response =", "response.status_code = code return response def bad_json_error_response(): \"\"\" Convenience function for returning an", "422 assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors'])", "data. Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to", "\"\"\" import inspect import functools import json from traceback import format_exception from flask", "string meet our criteria for a date of birth \"\"\" #Do whatever you", "is already in use. Please try a different email address.\", verify_account_available), ('date_of_birth', 'Please", "input to an API request \"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields,", "\"\"\" Demo of json_required decorator for API input validation/error handling \"\"\" import inspect", "return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function", "\"\"\" Does the supplied date string meet our criteria for a date of", "date of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use the data in", "internal use, nice to have the traceback in the API response for debugging", "jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return response def bad_json_error_response(): \"\"\" Convenience", "dict if data.get(field) in (None, ''): if nested_fields: error_msg = requirements.get('message') else: error_msg", "'last_name':\"Please provide your last name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please provide", "validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params = [] for arg", "related to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was a problem parsing", "None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON", "'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func", "\"\"\" #Run a query, use an ORM, use Twilio to call someone and", "your date of birth' }, validations=[ ('email', 'Please provide a valid email address',", "send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON", "address.\", verify_account_available), ('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth) ] )", "'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }),", "name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please", "errors = [] def check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields =", "= Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience function", "for field, requirements in fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field) in", "__name__ == \"__main__\": with app.test_client() as client: response = client.post( '/do/something', data=json.dumps({ \"first_name\":", "if __name__ == \"__main__\": with app.test_client() as client: response = client.post( '/do/something', data=json.dumps({", "response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\":", "the supplied JSON data. Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\"", "response for debugging #Probably don't want to include for public APIs etype, value,", "a valid email address', 'date_of_birth':'Please provide your date of birth' }, validations=[ ('email',", "if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation", "see if this email is already registered \"\"\" #Run a query, use an", "don't want to include for public APIs etype, value, tb = sys.exc_info() error_info", "func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return api_error_response(code=422,", "validate JSON input to an API request \"\"\" if func is None: return", "inspect import functools import json from traceback import format_exception from flask import jsonify,", "API input\", errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email):", "flask import jsonify, request import sys from flask.exceptions import JSONBadRequest from flask import", "to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was a problem parsing the", "return bad_json_error_response() #Check for specific fields errors = [] def check_required_fields(data, fields): for", "parse correctly) try: if request.json is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response()", "data in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as client:", "Flask import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\",", "no JSON was supplied (or it didn't parse correctly) try: if request.json is", "an ORM, use Twilio to call someone and ask them :-) return True", "return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet our criteria", "validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception: #For internal", "'date_of_birth':'Please provide your date of birth' }, validations=[ ('email', 'Please provide a valid", "JSONBadRequest from flask import Flask import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested", "def verify_account_available(email): \"\"\" Check to see if this email is already registered \"\"\"", "email is already registered \"\"\" #Run a query, use an ORM, use Twilio", "return api_error_response(code=400, message=\"There was a problem parsing the supplied JSON data. Please send", "and ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date", "a date of birth \"\"\" #Do whatever you need to do... return True", "def check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields = type(requirements) == dict", "resource was not found\", errors=list()): \"\"\" Convenience function for returning a JSON response", "request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as client: response =", "== 1 assert response_dict['errors'][0]['field'] == 'first_name' assert response_dict['errors'][0]['message'] == 'Please provide your first", "import Flask import re app = Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not", "if request.json is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific", "validation_field, 'message': validation_message}) if errors: return api_error_response(code=422, message=\"JSON Validation Failed\", errors=errors) except Exception:", "'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify a", "func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see", "in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors: return", "last name.\", 'email':'Please specify a valid email address', 'date_of_birth':'Please provide your date of", "'/do/something', data=json.dumps({ \"first_name\": \"Brian\", \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json')", "correctly) try: if request.json is None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check", "of birth' }, validations=[ ('email', 'Please provide a valid email address', lambda email:", "field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message,", "error_info = ''.join(format_exception(etype, value, tb)) return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}])", "response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field'] == 'first_name'", "valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON input", "error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields:", "if nested_fields: error_msg = requirements.get('message') else: error_msg = requirements errors.append({'field': field, 'message': error_msg})", "message=message, errors=errors, success=False)) response.status_code = code return response def bad_json_error_response(): \"\"\" Convenience function", "public APIs etype, value, tb = sys.exc_info() error_info = ''.join(format_exception(etype, value, tb)) return", "errors=[{'message':error_info}]) return func(*args, **kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check", "message related to malformed/missing JSON data. \"\"\" return api_error_response(code=400, message=\"There was a problem", "query, use an ORM, use Twilio to call someone and ask them :-)", "requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for", "Flask(__name__) def api_error_response(code=404, message=\"Requested resource was not found\", errors=list()): \"\"\" Convenience function for", "value, tb)) return api_error_response(code=500, message=\"Internal Error validating API input\", errors=[{'message':error_info}]) return func(*args, **kwargs)", "{})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations: func_args = inspect.getargspec(validation_func).args func_params", "from flask import jsonify, request import sys from flask.exceptions import JSONBadRequest from flask", "import jsonify, request import sys from flask.exceptions import JSONBadRequest from flask import Flask", "nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json, required_fields) for validation_field, validation_message, validation_func in validations: func_args", "= client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json')", "== 422 assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON Validation Failed\" assert", "call someone and ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the", "handling \"\"\" import inspect import functools import json from traceback import format_exception from", "them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet", "provide a valid date of birth', valid_date_of_birth) ] ) def do_something_useful(): #Confidently use", "= json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\",", "fields): for field, requirements in fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field)", "content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post( '/do/something', data=json.dumps({", "import format_exception from flask import jsonify, request import sys from flask.exceptions import JSONBadRequest", "provide your date of birth' }, validations=[ ('email', 'Please provide a valid email", "format_exception from flask import jsonify, request import sys from flask.exceptions import JSONBadRequest from", "errors=errors) except Exception: #For internal use, nice to have the traceback in the", "was not found\", errors=list()): \"\"\" Convenience function for returning a JSON response that", "that includes appropriate error messages and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors,", "= re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if this email is already", "return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as client: response = client.post(", "function for returning a JSON response that includes appropriate error messages and code.", "in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client() as client: response", "Convenience function for returning a JSON response that includes appropriate error messages and", "Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate", "None: return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific fields errors =", "func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func) def decorated_function(*args, **kwargs): try: #If", "= [] def check_required_fields(data, fields): for field, requirements in fields.iteritems(): nested_fields = type(requirements)", "nice to have the traceback in the API response for debugging #Probably don't", "EMAIL_REGEX.match(email)), ('email', \"This email is already in use. Please try a different email", "bad_json_error_response(): \"\"\" Convenience function for returning an error message related to malformed/missing JSON", "appropriate error messages and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code", "= jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return response def bad_json_error_response(): \"\"\"", "flask.exceptions import JSONBadRequest from flask import Flask import re app = Flask(__name__) def", "return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide", "use Twilio to call someone and ask them :-) return True def valid_date_of_birth(date_of_birth):", "#Probably don't want to include for public APIs etype, value, tb = sys.exc_info()", "fields.iteritems(): nested_fields = type(requirements) == dict if data.get(field) in (None, ''): if nested_fields:", "your first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please specify a valid email", "to an API request \"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations)", "except Exception: #For internal use, nice to have the traceback in the API", "in validations: func_args = inspect.getargspec(validation_func).args func_params = [] for arg in func_args: func_params.append(request.json.get(arg))", "json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] == 422 assert response_dict['message'] == \"JSON", "arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field, 'message': validation_message}) if errors:", "def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string meet our criteria for a", "**kwargs) return decorated_function EMAIL_REGEX = re.compile(r\"[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\") def verify_account_available(email): \"\"\" Check to see if", "error_msg = requirements errors.append({'field': field, 'message': error_msg}) elif nested_fields: check_required_fields(data[field], requirements.get('fields', {})) check_required_fields(request.json,", "methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your last name.\",", "is not None and EMAIL_REGEX.match(email)), ('email', \"This email is already in use. Please", "debugging #Probably don't want to include for public APIs etype, value, tb =", "content_type='application/json') response_dict = json.loads(response.data) assert response.status_code == 422 assert response_dict['code'] == 422 assert", "== dict if data.get(field) in (None, ''): if nested_fields: error_msg = requirements.get('message') else:", "func_params = [] for arg in func_args: func_params.append(request.json.get(arg)) if not validation_func(*func_params): errors.append({'field': validation_field,", "return bad_json_error_response() except JSONBadRequest: return bad_json_error_response() #Check for specific fields errors = []", "Convenience function for returning an error message related to malformed/missing JSON data. \"\"\"", "whatever you need to do... return True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide", "lambda email: email is not None and EMAIL_REGEX.match(email)), ('email', \"This email is already", "import functools import json from traceback import format_exception from flask import jsonify, request", "verify_account_available), ('date_of_birth', 'Please provide a valid date of birth', valid_date_of_birth) ] ) def", "address', 'date_of_birth':'Please provide your date of birth' }, validations=[ ('email', 'Please provide a", "}), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post(", "code return response def bad_json_error_response(): \"\"\" Convenience function for returning an error message", "an API request \"\"\" if func is None: return functools.partial(json_required, required_fields=required_fields, validations=validations) @functools.wraps(func)", "@functools.wraps(func) def decorated_function(*args, **kwargs): try: #If no JSON was supplied (or it didn't", "def bad_json_error_response(): \"\"\" Convenience function for returning an error message related to malformed/missing", "ask them :-) return True def valid_date_of_birth(date_of_birth): \"\"\" Does the supplied date string", "422 assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert response_dict['errors'][0]['field']", "date of birth \"\"\" #Do whatever you need to do... return True @app.route(\"/do/something\",", "== 422 assert response_dict['message'] == \"JSON Validation Failed\" assert len(response_dict['errors']) == 1 assert", "use the data in request.json... return jsonify(dict(status='OK')) if __name__ == \"__main__\": with app.test_client()", "response = client.post( '/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True,", "@json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your last name.\", 'email':'Please", "] ) def do_something_useful(): #Confidently use the data in request.json... return jsonify(dict(status='OK')) if", "Decorator used to validate JSON input to an API request \"\"\" if func", "parsing the supplied JSON data. Please send valid JSON.\") def json_required(func=None, required_fields={}, validations=[]):", "API input validation/error handling \"\"\" import inspect import functools import json from traceback", "True @app.route(\"/do/something\", methods=['POST']) @json_required( required_fields={ 'first_name':\"Please provide your first name.\", 'last_name':\"Please provide your", "#For internal use, nice to have the traceback in the API response for", "follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response_dict['status'] == 'OK' response = client.post( '/do/something',", "'/do/something', data=json.dumps({ \"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict =", "JSON.\") def json_required(func=None, required_fields={}, validations=[]): \"\"\" Decorator used to validate JSON input to", "\"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert response.status_code ==", "\"last_name\": \"Corbin\", \"email\": \"<EMAIL>\", \"date_of_birth\": \"01/01/1970\" }), follow_redirects=True, content_type='application/json') response_dict = json.loads(response.data) assert", "and code. \"\"\" response = jsonify(dict(code=code, message=message, errors=errors, success=False)) response.status_code = code return", "JSON response that includes appropriate error messages and code. \"\"\" response = jsonify(dict(code=code," ]
[ "y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M", "representing the camera extrinsic properties Attributes ---------- position : Tensor the camera position", "(3,) position tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position = position", "from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic", "device to store the tensors to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position,", "p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0,", "torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return", "0)) device : str or torch.device (optional) the device to store the tensors", "self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector,", "self.position def view_matrix(self): \"\"\" Returns the current view matrix Returns ------- Tensor a", "camera target (default is (0, 0, 1)) up_vector : list or tuple (optional)", "camera position target : Tensor the camera target up_vector : Tensor the camera", "torch from ..math.cross import * from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A", "tuple (optional) the camera target (default is (0, 0, 1)) up_vector : list", "device(self): return self._device @device.setter def device(self, value): self._device = value self.position = self.position.to(self.device)", "..math.cross import * from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A class representing", "direction tensor \"\"\" return self.target - self.position def view_matrix(self): \"\"\" Returns the current", "\"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p", "return self._device @device.setter def device(self, value): self._device = value self.position = self.position.to(self.device) self.target", "(4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y =", "or tuple (optional) the camera target (default is (0, 0, 1)) up_vector :", "0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position : list or", "in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector =", "current view matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\" def __init__(self, position=(0,", "------- CameraExtrinsic the extrinsic itself \"\"\" self.target = target return self def look_from(self,", "the extrinsic dtype and/or device Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic", "1, 0), device='cuda:0'): \"\"\" Parameters ---------- position : list or tuple (optional) the", "or tuple (optional) the camera up vector (default is (0, 1, 0)) device", "Parameters ---------- position : Tensor the (3,) position tensor Returns ------- CameraExtrinsic the", "\"\"\" return self.target - self.position def view_matrix(self): \"\"\" Returns the current view matrix", "the camera position target : Tensor the camera target up_vector : Tensor the", "device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device =", "vector device : str or torch.device the device to store the tensors to", "0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position :", "Tensor a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z))", "camera direction view_matrix() returns the current view matrix to(**kwargs) changes extrinsic dtype and/or", "dtype and/or device \"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0,", "or tuple (optional) the camera position (default is (0, 0, 0)) target :", "camera target look_from(position) sets the camera position direction() returns the camera direction view_matrix()", "1, 0)) device : str or torch.device (optional) the device to store the", "(optional) the camera position (default is (0, 0, 0)) target : list or", "self @property def device(self): return self._device @device.setter def device(self, value): self._device = value", ": Tensor the (3,) target tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\"", "CameraExtrinsic the extrinsic itself \"\"\" self.position = position return self def direction(self): \"\"\"", "the extrinsic itself \"\"\" self.position = position return self def direction(self): \"\"\" Returns", "(0, 0, 1)) up_vector : list or tuple (optional) the camera up vector", "to store the tensors to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float,", ": str or torch.device the device to store the tensors to Methods -------", "= self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self): return self._device @device.setter", "kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs)", "------- Tensor a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0),", ": Tensor the camera target up_vector : Tensor the camera up vector device", "\"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'):", "0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position", "self._device @device.setter def device(self, value): self._device = value self.position = self.position.to(self.device) self.target =", "= normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(),", "Tensor the camera position target : Tensor the camera target up_vector : Tensor", "position return self def direction(self): \"\"\" Returns the camera direction Returns ------- Tensor", "0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\" Changes the", "kwargs : ... Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if 'device' in", "view matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\" def __init__(self, position=(0, 0,", "def direction(self): \"\"\" Returns the camera direction Returns ------- Tensor the (3,) direction", "device : str or torch.device the device to store the tensors to Methods", "the camera direction Returns ------- Tensor the (3,) direction tensor \"\"\" return self.target", "extrinsic itself \"\"\" self.position = position return self def direction(self): \"\"\" Returns the", "the tensors to Methods ------- look_at(target) sets the camera target look_from(position) sets the", "extrinsic properties Attributes ---------- position : Tensor the camera position target : Tensor", "\"\"\" Sets the camera position Parameters ---------- position : Tensor the (3,) position", "cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0,", ": ... Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if 'device' in kwargs:", "if 'device' in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs)", "self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return", "Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if 'device' in kwargs: self._device =", "Tensor the camera up vector device : str or torch.device the device to", "up vector device : str or torch.device the device to store the tensors", "position Parameters ---------- position : Tensor the (3,) position tensor Returns ------- CameraExtrinsic", "view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z,", "camera position Parameters ---------- position : Tensor the (3,) position tensor Returns -------", "the device to store the tensors to (default is 'cuda:0') \"\"\" self.position =", "direction(self): \"\"\" Returns the camera direction Returns ------- Tensor the (3,) direction tensor", "look_at(target) sets the camera target look_from(position) sets the camera position direction() returns the", "device : str or torch.device (optional) the device to store the tensors to", "tensors to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target =", "look_at(self, target): \"\"\" Sets the camera target Parameters ---------- target : Tensor the", "is (0, 0, 1)) up_vector : list or tuple (optional) the camera up", "------- CameraExtrinsic the extrinsic itself \"\"\" if 'device' in kwargs: self._device = kwargs['device']", "(0, 1, 0)) device : str or torch.device (optional) the device to store", "tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position = position return self", "the (3,) position tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position =", "changes extrinsic dtype and/or device \"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0,", "camera up vector (default is (0, 1, 0)) device : str or torch.device", "Methods ------- look_at(target) sets the camera target look_from(position) sets the camera position direction()", "'device' in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector", "store the tensors to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device)", "def view_matrix(self): \"\"\" Returns the current view matrix Returns ------- Tensor a (4,4,)", "self.position = position return self def direction(self): \"\"\" Returns the camera direction Returns", "Parameters ---------- target : Tensor the (3,) target tensor Returns ------- CameraExtrinsic the", "---------- position : Tensor the (3,) position tensor Returns ------- CameraExtrinsic the extrinsic", "camera extrinsic properties Attributes ---------- position : Tensor the camera position target :", "itself \"\"\" if 'device' in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target", "0, 1)) up_vector : list or tuple (optional) the camera up vector (default", "device(self, value): self._device = value self.position = self.position.to(self.device) self.target = self.target.to(self.device) self.up_vector =", "and/or device Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic the extrinsic itself", "matrix Returns ------- Tensor a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x", "(default is (0, 0, 0)) target : list or tuple (optional) the camera", "(optional) the device to store the tensors to (default is 'cuda:0') \"\"\" self.position", "extrinsic itself \"\"\" self.target = target return self def look_from(self, position): \"\"\" Sets", "to store the tensors to Methods ------- look_at(target) sets the camera target look_from(position)", "self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device", "def device(self, value): self._device = value self.position = self.position.to(self.device) self.target = self.target.to(self.device) self.up_vector", ": list or tuple (optional) the camera target (default is (0, 0, 1))", "direction() returns the camera direction view_matrix() returns the current view matrix to(**kwargs) changes", "(default is (0, 1, 0)) device : str or torch.device (optional) the device", "look_from(position) sets the camera position direction() returns the camera direction view_matrix() returns the", "------- Tensor the (3,) direction tensor \"\"\" return self.target - self.position def view_matrix(self):", "Returns ------- Tensor a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x =", "Parameters ---------- position : list or tuple (optional) the camera position (default is", "itself \"\"\" self.position = position return self def direction(self): \"\"\" Returns the camera", "up vector (default is (0, 1, 0)) device : str or torch.device (optional)", "'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector", "or torch.device (optional) the device to store the tensors to (default is 'cuda:0')", "torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device)", "current view matrix Returns ------- Tensor a (4,4,) view matrix \"\"\" z =", "device \"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0),", "target tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target = target return", "def to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or device Parameters ---------- kwargs", "self def direction(self): \"\"\" Returns the camera direction Returns ------- Tensor the (3,)", "A class representing the camera extrinsic properties Attributes ---------- position : Tensor the", "to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or device Parameters ---------- kwargs :", "\"\"\" A class representing the camera extrinsic properties Attributes ---------- position : Tensor", "1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position : list or tuple", "the camera position direction() returns the camera direction view_matrix() returns the current view", "position (default is (0, 0, 0)) target : list or tuple (optional) the", "self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self): return self._device @device.setter def device(self,", ": list or tuple (optional) the camera up vector (default is (0, 1,", "-p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self,", "dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\" Changes the extrinsic dtype", "= self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self):", "from ..math.cross import * from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A class", "dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs):", "position direction() returns the camera direction view_matrix() returns the current view matrix to(**kwargs)", "(default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float,", "list or tuple (optional) the camera target (default is (0, 0, 1)) up_vector", "the camera position (default is (0, 0, 0)) target : list or tuple", "self.target - self.position def view_matrix(self): \"\"\" Returns the current view matrix Returns -------", "dtype and/or device Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic the extrinsic", "(default is (0, 0, 1)) up_vector : list or tuple (optional) the camera", "\"\"\" self.position = position return self def direction(self): \"\"\" Returns the camera direction", "camera target up_vector : Tensor the camera up vector device : str or", "position : list or tuple (optional) the camera position (default is (0, 0,", "0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\" Changes", "a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y", "... Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if 'device' in kwargs: self._device", "0), device='cuda:0'): \"\"\" Parameters ---------- position : list or tuple (optional) the camera", "the camera target look_from(position) sets the camera position direction() returns the camera direction", "target): \"\"\" Sets the camera target Parameters ---------- target : Tensor the (3,)", "camera position direction() returns the camera direction view_matrix() returns the current view matrix", "the camera position Parameters ---------- position : Tensor the (3,) position tensor Returns", "**kwargs): \"\"\" Changes the extrinsic dtype and/or device Parameters ---------- kwargs : ...", "..math.normvec import * class CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic properties", "dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device", "\"\"\" Sets the camera target Parameters ---------- target : Tensor the (3,) target", "the (3,) direction tensor \"\"\" return self.target - self.position def view_matrix(self): \"\"\" Returns", "target look_from(position) sets the camera position direction() returns the camera direction view_matrix() returns", "matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\" def __init__(self, position=(0, 0, 0),", "tuple (optional) the camera up vector (default is (0, 1, 0)) device :", "= normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p = self.position.unsqueeze(0)", "def look_at(self, target): \"\"\" Sets the camera target Parameters ---------- target : Tensor", "= self.up_vector.to(**kwargs) return self @property def device(self): return self._device @device.setter def device(self, value):", "is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device)", "device=device) self._device = device def look_at(self, target): \"\"\" Sets the camera target Parameters", "tensors to Methods ------- look_at(target) sets the camera target look_from(position) sets the camera", ": list or tuple (optional) the camera position (default is (0, 0, 0))", "tensor \"\"\" return self.target - self.position def view_matrix(self): \"\"\" Returns the current view", "self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self): return self._device @device.setter def", "target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position : list", "device Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic the extrinsic itself \"\"\"", "position tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position = position return", "Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position = position return self def", "the device to store the tensors to Methods ------- look_at(target) sets the camera", "y = cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()),", "@device.setter def device(self, value): self._device = value self.position = self.position.to(self.device) self.target = self.target.to(self.device)", "sets the camera position direction() returns the camera direction view_matrix() returns the current", "return self @property def device(self): return self._device @device.setter def device(self, value): self._device =", "the camera extrinsic properties Attributes ---------- position : Tensor the camera position target", "the camera target up_vector : Tensor the camera up vector device : str", "extrinsic dtype and/or device \"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0, 1),", "__init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters", "1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\" Changes the extrinsic", "position target : Tensor the camera target up_vector : Tensor the camera up", "= self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]],", "\"\"\" if 'device' in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs) self.target =", "Tensor the (3,) target tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target", "def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\"", "the camera up vector (default is (0, 1, 0)) device : str or", "look_from(self, position): \"\"\" Sets the camera position Parameters ---------- position : Tensor the", "position): \"\"\" Sets the camera position Parameters ---------- position : Tensor the (3,)", "returns the current view matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\" def", "self._device = device def look_at(self, target): \"\"\" Sets the camera target Parameters ----------", ": Tensor the camera up vector device : str or torch.device the device", "target return self def look_from(self, position): \"\"\" Sets the camera position Parameters ----------", "= torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self, target): \"\"\" Sets the", "torch.device (optional) the device to store the tensors to (default is 'cuda:0') \"\"\"", "\"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector =", "\"\"\" Parameters ---------- position : list or tuple (optional) the camera position (default", "target : Tensor the camera target up_vector : Tensor the camera up vector", "properties Attributes ---------- position : Tensor the camera position target : Tensor the", "Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if", "to(**kwargs) changes extrinsic dtype and/or device \"\"\" def __init__(self, position=(0, 0, 0), target=(0,", ": str or torch.device (optional) the device to store the tensors to (default", "0)) target : list or tuple (optional) the camera target (default is (0,", "the current view matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\" def __init__(self,", "= kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self", "up_vector : list or tuple (optional) the camera up vector (default is (0,", "z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p =", "str or torch.device the device to store the tensors to Methods ------- look_at(target)", "= device def look_at(self, target): \"\"\" Sets the camera target Parameters ---------- target", "CameraExtrinsic the extrinsic itself \"\"\" if 'device' in kwargs: self._device = kwargs['device'] self.position", "(optional) the camera target (default is (0, 0, 1)) up_vector : list or", "return self def look_from(self, position): \"\"\" Sets the camera position Parameters ---------- position", "= target return self def look_from(self, position): \"\"\" Sets the camera position Parameters", "device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self, target): \"\"\"", "to Methods ------- look_at(target) sets the camera target look_from(position) sets the camera position", "---------- target : Tensor the (3,) target tensor Returns ------- CameraExtrinsic the extrinsic", "target : Tensor the (3,) target tensor Returns ------- CameraExtrinsic the extrinsic itself", "extrinsic itself \"\"\" if 'device' in kwargs: self._device = kwargs['device'] self.position = self.position.to(**kwargs)", "tuple (optional) the camera position (default is (0, 0, 0)) target : list", "def look_from(self, position): \"\"\" Sets the camera position Parameters ---------- position : Tensor", "(0, 0, 0)) target : list or tuple (optional) the camera target (default", "the tensors to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target", "target Parameters ---------- target : Tensor the (3,) target tensor Returns ------- CameraExtrinsic", "(3,) direction tensor \"\"\" return self.target - self.position def view_matrix(self): \"\"\" Returns the", "import * class CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic properties Attributes", "dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self, target):", "position : Tensor the camera position target : Tensor the camera target up_vector", "return self.target - self.position def view_matrix(self): \"\"\" Returns the current view matrix Returns", "torch.device the device to store the tensors to Methods ------- look_at(target) sets the", "- self.position def view_matrix(self): \"\"\" Returns the current view matrix Returns ------- Tensor", "---------- kwargs : ... Returns ------- CameraExtrinsic the extrinsic itself \"\"\" if 'device'", "CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic properties Attributes ---------- position :", "to (default is 'cuda:0') \"\"\" self.position = torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target,", "camera position (default is (0, 0, 0)) target : list or tuple (optional)", "\"\"\" Changes the extrinsic dtype and/or device Parameters ---------- kwargs : ... Returns", "class representing the camera extrinsic properties Attributes ---------- position : Tensor the camera", "= torch.tensor(position, dtype=torch.float, device=device) self.target = torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float,", "Returns the current view matrix Returns ------- Tensor a (4,4,) view matrix \"\"\"", "up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ---------- position : list or tuple (optional)", "self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self): return self._device", "self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self, target): \"\"\" Sets", "is (0, 1, 0)) device : str or torch.device (optional) the device to", "------- look_at(target) sets the camera target look_from(position) sets the camera position direction() returns", "def device(self): return self._device @device.setter def device(self, value): self._device = value self.position =", "return M def to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or device Parameters", "camera up vector device : str or torch.device the device to store the", "Tensor the (3,) direction tensor \"\"\" return self.target - self.position def view_matrix(self): \"\"\"", "Sets the camera target Parameters ---------- target : Tensor the (3,) target tensor", "normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(),", "0, 0)) target : list or tuple (optional) the camera target (default is", "returns the camera direction view_matrix() returns the current view matrix to(**kwargs) changes extrinsic", "str or torch.device (optional) the device to store the tensors to (default is", "dtype=torch.float, device=device) self._device = device def look_at(self, target): \"\"\" Sets the camera target", "Changes the extrinsic dtype and/or device Parameters ---------- kwargs : ... Returns -------", "itself \"\"\" self.target = target return self def look_from(self, position): \"\"\" Sets the", "the current view matrix Returns ------- Tensor a (4,4,) view matrix \"\"\" z", "---------- position : Tensor the camera position target : Tensor the camera target", "direction view_matrix() returns the current view matrix to(**kwargs) changes extrinsic dtype and/or device", "or torch.device the device to store the tensors to Methods ------- look_at(target) sets", "Sets the camera position Parameters ---------- position : Tensor the (3,) position tensor", "torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\"", "extrinsic dtype and/or device Parameters ---------- kwargs : ... Returns ------- CameraExtrinsic the", "Tensor the camera target up_vector : Tensor the camera up vector device :", "the camera target (default is (0, 0, 1)) up_vector : list or tuple", "Attributes ---------- position : Tensor the camera position target : Tensor the camera", "the camera direction view_matrix() returns the current view matrix to(**kwargs) changes extrinsic dtype", "vector (default is (0, 1, 0)) device : str or torch.device (optional) the", "torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self,", "store the tensors to Methods ------- look_at(target) sets the camera target look_from(position) sets", "\"\"\" Returns the camera direction Returns ------- Tensor the (3,) direction tensor \"\"\"", "= position return self def direction(self): \"\"\" Returns the camera direction Returns -------", "\"\"\" Returns the current view matrix Returns ------- Tensor a (4,4,) view matrix", "z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0) return M def", "self.target = target return self def look_from(self, position): \"\"\" Sets the camera position", "view matrix Returns ------- Tensor a (4,4,) view matrix \"\"\" z = normr(self.direction().unsqueeze(0))", "view_matrix() returns the current view matrix to(**kwargs) changes extrinsic dtype and/or device \"\"\"", "* from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A class representing the camera", ": Tensor the (3,) position tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\"", "the extrinsic itself \"\"\" if 'device' in kwargs: self._device = kwargs['device'] self.position =", "kwargs['device'] self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property", "and/or device \"\"\" def __init__(self, position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1,", "\"\"\" self.target = target return self def look_from(self, position): \"\"\" Sets the camera", "is (0, 0, 0)) target : list or tuple (optional) the camera target", "x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0,", "direction Returns ------- Tensor the (3,) direction tensor \"\"\" return self.target - self.position", "------- CameraExtrinsic the extrinsic itself \"\"\" self.position = position return self def direction(self):", "(optional) the camera up vector (default is (0, 1, 0)) device : str", "z)) y = cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(),", "import torch from ..math.cross import * from ..math.normvec import * class CameraExtrinsic(object): \"\"\"", "list or tuple (optional) the camera up vector (default is (0, 1, 0))", "return self def direction(self): \"\"\" Returns the camera direction Returns ------- Tensor the", "camera target Parameters ---------- target : Tensor the (3,) target tensor Returns -------", "= torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)), dim=0)", "torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def look_at(self, target): \"\"\" Sets the camera", "self def look_from(self, position): \"\"\" Sets the camera position Parameters ---------- position :", "view_matrix(self): \"\"\" Returns the current view matrix Returns ------- Tensor a (4,4,) view", "M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float, device=self.device)),", "(3,) target tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target = target", "tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target = target return self", "list or tuple (optional) the camera position (default is (0, 0, 0)) target", "target : list or tuple (optional) the camera target (default is (0, 0,", "Returns the camera direction Returns ------- Tensor the (3,) direction tensor \"\"\" return", "CameraExtrinsic the extrinsic itself \"\"\" self.target = target return self def look_from(self, position):", "M def to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or device Parameters ----------", "target (default is (0, 0, 1)) up_vector : list or tuple (optional) the", "Returns ------- Tensor the (3,) direction tensor \"\"\" return self.target - self.position def", "x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p = self.position.unsqueeze(0) M =", "device=self.device)), dim=0) return M def to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or", "Tensor the (3,) position tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.position", "matrix \"\"\" z = normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x)", "up_vector : Tensor the camera up vector device : str or torch.device the", "position=(0, 0, 0), target=(0, 0, 1), up_vector=(0, 1, 0), device='cuda:0'): \"\"\" Parameters ----------", "Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target = target return self def", "1)) up_vector : list or tuple (optional) the camera up vector (default is", ": Tensor the camera position target : Tensor the camera target up_vector :", "value): self._device = value self.position = self.position.to(self.device) self.target = self.target.to(self.device) self.up_vector = self.up_vector.to(self.device)", "= cross(z, x) p = self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1),", "dim=0) return M def to(self, **kwargs): \"\"\" Changes the extrinsic dtype and/or device", "---------- position : list or tuple (optional) the camera position (default is (0,", "sets the camera target look_from(position) sets the camera position direction() returns the camera", "normr(self.direction().unsqueeze(0)) x = normr(cross(self.up_vector.unsqueeze(0), z)) y = cross(z, x) p = self.position.unsqueeze(0) M", "camera direction Returns ------- Tensor the (3,) direction tensor \"\"\" return self.target -", "the camera up vector device : str or torch.device the device to store", "* class CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic properties Attributes ----------", "the (3,) target tensor Returns ------- CameraExtrinsic the extrinsic itself \"\"\" self.target =", "self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def device(self): return", "position : Tensor the (3,) position tensor Returns ------- CameraExtrinsic the extrinsic itself", "self.position = self.position.to(**kwargs) self.target = self.target.to(**kwargs) self.up_vector = self.up_vector.to(**kwargs) return self @property def", "the camera target Parameters ---------- target : Tensor the (3,) target tensor Returns", "device='cuda:0'): \"\"\" Parameters ---------- position : list or tuple (optional) the camera position", "device def look_at(self, target): \"\"\" Sets the camera target Parameters ---------- target :", "= torch.tensor(target, dtype=torch.float, device=device) self.up_vector = torch.tensor(up_vector, dtype=torch.float, device=device) self._device = device def", "import * from ..math.normvec import * class CameraExtrinsic(object): \"\"\" A class representing the", "self.position.unsqueeze(0) M = torch.cat((torch.cat((x.t(), y.t(), z.t(), -p.t()), dim=1), torch.tensor([[0, 0, 0, 1]], dtype=torch.float,", "the extrinsic itself \"\"\" self.target = target return self def look_from(self, position): \"\"\"", "self.up_vector.to(**kwargs) return self @property def device(self): return self._device @device.setter def device(self, value): self._device", "@property def device(self): return self._device @device.setter def device(self, value): self._device = value self.position", "class CameraExtrinsic(object): \"\"\" A class representing the camera extrinsic properties Attributes ---------- position", "device to store the tensors to Methods ------- look_at(target) sets the camera target", "target up_vector : Tensor the camera up vector device : str or torch.device" ]
[ "= feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880',", "updateChart function with this selected option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id':", "color: #2A3F5E; } .selection { margin-bottom: 20px; } .dot { stroke: #fff; opacity:", "sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6; display: relative; } .axis path,", "repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options", "make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add grid lines // add the", ".attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width /", "var tipMouseout = d => { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); //", "create the continuous (numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\") var width", "\"label\") .attr(\"x\", width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); //", "crispEdges; } .grid path { stroke-width: 0; } .tooltip { position: absolute; font-size:", "its position/contents are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\",", "chart with the new color coding scheme function update(feature) { colorFeature = feature", "d => { let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } }", "right\"> <span> Color by feature: </span> </label> <select id=$select_id> $options </select> </div> <div", "via url, we can eliminate this define here define($chart_id, ['d3'], function(d3) { return", "function update(feature) { colorFeature = feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA',", "has been chosen var selectedOption = d3.select(this).property(\"value\") // run the updateChart function with", "legend_id).attr(\"y\", 0); // tooltip mouseover event handler var tipMouseover = d => {", "legend = d3.select(\"#\" + legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\", 15)", "\"14px\") var textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\",", ".tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width)", "var textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\");", "{} # map each feature to type num_feature_ranges = {} for x in", "- 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines", "update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return", "path, .axis line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label {", "class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color by feature:", "gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var", "var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend container", ".attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } //", "// gridlines in x axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) }", "featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\")", "#E5ECF6; display: relative; } .axis path, .axis line { fill: none; stroke: #2A3F5E;", "and its position/contents are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\")", "{top: 40, right: 10, bottom: 50, left: 50}, width = 650 - margin.left", "from string import Template import numpy as np # function to initialize a", "tipMouseover = d => { // x and y numeric labels let html", "option that has been chosen var selectedOption = d3.select(this).property(\"value\") // run the updateChart", ".text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler var tipMouseout", "\"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\",", "width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label", "15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient =", "function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) { var initialFeature =", "margin.bottom; // append the svg object to the body of the page var", "{ margin-bottom: 20px; } .dot { stroke: #fff; opacity: 0.8; } .grid line", "+ margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top", "x axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines in", ".range([height, 0]); // Add X-axis and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\",", "height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label svg .append('g')", "d3.select(\"#\" + legend_id).html(\"\") var width = 30, height = 300; // add legend", "the page var svg = d3.select('#' + figure_id) .attr(\"width\", width + margin.left +", "legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i", ".call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\",", "d => { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't care about", "map } else { let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\",", "= d3.select(this).property(\"value\") // run the updateChart function with this selected option update(selectedOption) });", ".append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\",", "var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <= 100; i", ".legend { background-color: white; position: absolute; left: 650px; top: 20px; width: auto; height:", "right: 10, bottom: 50, left: 50}, width = 650 - margin.left - margin.right,", "feature_types = {} # map each feature to type num_feature_ranges = {} for", "to get the right orientation of gradient const legendScale = num => {", "gridlines in y axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } //", ".attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i", "textwrap import dedent from IPython.core.display import display, HTML from string import Template import", "d => { // x and y numeric labels let html = xCat", "legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i =", "{} for x in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()),", "return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover", "Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints", "#2A3F5E; } .selection { margin-bottom: 20px; } .dot { stroke: #fff; opacity: 0.8;", "\"end\") .text(yCat); // gridlines in x axis function function make_x_gridlines() { return d3.axisBottom(x)", "updateLegendCat(featureColors) // update the legend with the new color map } else {", ".attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 +", "'#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type =", "the body of the page var svg = d3.select('#' + figure_id) .attr(\"width\", width", "figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height + margin.top + margin.bottom)", "function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add grid lines //", "tooltip container to the body container // it's invisible and its position/contents are", "function updateLegendNum(domain) { // create the continuous (numerical) legend var legend = d3.select(\"#\"", "\"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler var tipMouseout = d", "<svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) {", "</script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id':", "'#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type", "\",\" + margin.top + \")\"); // X and Y scales and Axis var", "svg = d3.select('#' + figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height", "d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY - 15) +", "var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label svg", "id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id,", "- margin.right, height = 400 - margin.top - margin.bottom; // append the svg", "x in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if", "='{x}'>{x}</option>\" for x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = {", "'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat), 'size_cat': repr(size_cat), 'options': repr(features_html_options) })))", "40, right: 10, bottom: 50, left: 50}, width = 650 - margin.left -", "x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\",", ".attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis function function make_x_gridlines()", "{ fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E; }", "tooltip mouseover event handler var tipMouseover = d => { // x and", "+ \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \" +", "+ \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html +=", "import numpy as np # function to initialize a scatter plot def init_chart(data,features):", "stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path { stroke-width: 0; }", "select_id, data, xCat, yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\")", "xCat, yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin", "var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height,", "</style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure", "the legend with the new color map } else { let feature_domain =", "var dots = svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip container", "+ margin.left + margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\"", "function that update the chart with the new color coding scheme function update(feature)", "selectedOption = d3.select(this).property(\"value\") // run the updateChart function with this selected option update(selectedOption)", "+ margin.left + \",\" + margin.top + \")\"); // X and Y scales", "size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label", "= color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) {", "= 30, height = 300; // add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\",", ".transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create the categorical legend var", ".attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\"", "legend var legend = d3.select(\"#\" + legend_id).html(\"\") var width = 30, height =", "line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E;", "'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' +", ".style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain) { // create", "Y scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y", "grid lines // add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" +", "dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\",", "var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100])", "= d => { // x and y numeric labels let html =", "tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\")", "\"categorical\") { color = d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d =>", "selected option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges':", "= \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features ]) dict_data = data.replace(np.nan,", "title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1;", "legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\",", "style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id],", "margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left +", "during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the", ") .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\",", "repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([", "select_id).property(\"value\") var margin = {top: 40, right: 10, bottom: 50, left: 50}, width", "var margin = {top: 40, right: 10, bottom: 50, left: 50}, width =", "add grid lines // add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\"", "tooltip mouseout event handler var tipMouseout = d => { tooltip.transition() .duration(0) //", "width: auto; height: auto; pointer-events: none; background-color: white; padding: 5px; } .legend {", "text-align: right\"> <span> Color by feature: </span> </label> <select id=$select_id> $options </select> </div>", "} .legend { background-color: white; position: absolute; left: 650px; top: 20px; width: auto;", "i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to get", "// add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var", "0; } .tooltip { position: absolute; font-size: 12px; width: auto; height: auto; pointer-events:", "+= 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i))", ".on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update the chart with the new", ".attr(\"fill\", d => { let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) }", "svg object to the body of the page var svg = d3.select('#' +", "# function to initialize a scatter plot def init_chart(data,features): chart_id = 'mychart-' +", "+ \")\"); // X and Y scales and Axis var x = d3.scaleLinear()", "each feature to type num_feature_ranges = {} for x in features: if data[x].dtype", "X and Y scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]);", "axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width", "str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat),", "y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label svg .append('g')", "tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend container to", "the svg object to the body of the page var svg = d3.select('#'", "\"<br><br>\" // color feature label html += colorFeature + \": \" + d[colorFeature]", "legend_id, select_id, data, xCat, yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\" +", "feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color:", "d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label svg .append('g') .attr(\"class\", \"x", "= d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30)", "0.8; } .grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges;", "<= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); //", "to initialize a scatter plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types", "} .label { color: #2A3F5E; } .selection { margin-bottom: 20px; } .dot {", "eliminate this define here define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id, select_id,", "d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]]))", "'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat':", "(d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\",", "id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id", "let type = $feature_types[feature]; if (type === \"categorical\") { color = d3.scaleOrdinal(colors); let", "margin.left + \",\" + margin.top + \")\"); // X and Y scales and", ".style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i <= 5; i += 1)", "to the body of the page var svg = d3.select('#' + figure_id) .attr(\"width\",", "// add grid lines // add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\",", ".append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30)", "i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4)", "colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\")", "{ 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart via url, we", "height = 300; // add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature)", "categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\") // clear current legend content", "featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) // update the legend with the", "0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path { stroke-width: 0; } .tooltip", "\"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var dots = svg", "stroke-width: 2px; shape-rendering: crispEdges; } .grid path { stroke-width: 0; } .tooltip {", "(figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\"", "{ // x and y numeric labels let html = xCat + \":", ".domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis", "the categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\") // clear current legend", "sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update the chart", "update(feature) { colorFeature = feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A',", "[min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align:", "{ return d3.axisLeft(y) .ticks(5) } // add grid lines // add the X", "= 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\",", ".style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition()", "function(d) { // recover the option that has been chosen var selectedOption =", "$feature_types[feature]; if (type === \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors = {}", "'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart via url, we can eliminate", "for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color by feature: </span> </label>", "select_id).on(\"change\", function(d) { // recover the option that has been chosen var selectedOption", "clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let", ".attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); }", "num_feature_ranges = {} for x in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain", "40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1", ".text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\",", ".duration(0) // ms .style(\"opacity\", 0); // don't care about position! }; var sizeScale", "650 - margin.left - margin.right, height = 400 - margin.top - margin.bottom; //", "+ ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler", "to type num_feature_ranges = {} for x in features: if data[x].dtype in [\"int64\",", "container // it's invisible and its position/contents are defined during mouseover var tooltip", "10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\")", "num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body {", "the option that has been chosen var selectedOption = d3.select(this).property(\"value\") // run the", "legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\",", "paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart via url,", "return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\",", "update(initialFeature) // A function that update the chart with the new color coding", "id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat,", "{ let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" +", "function(d3) { return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) {", "15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature => {", "30, height = 300; // add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10)", "i = 0; i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i +", "= d3.select(\"#\" + legend_id).html(\"\") var width = 30, height = 300; // add", "ms .style(\"opacity\", 0); // don't care about position! }; var sizeScale = d3.scaleLinear()", "<style> body { font: 11px sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6;", "color map } else { let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots", "display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color", "{ color: #2A3F5E; } .selection { margin-bottom: 20px; } .dot { stroke: #fff;", "</script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id", "import Template import numpy as np # function to initialize a scatter plot", "Template import numpy as np # function to initialize a scatter plot def", "textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var", "legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i", "been chosen var selectedOption = d3.select(this).property(\"value\") // run the updateChart function with this", "20px; } .dot { stroke: #fff; opacity: 0.8; } .grid line { stroke:", "svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\",", "\": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3))", "if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]:", "<script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes )", "}) } function updateLegendNum(domain) { // create the continuous (numerical) legend var legend", "+ 10) + \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9)", "var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event handler var", "\"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create the categorical legend", "define here define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id, select_id, data, xCat,", ".range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) )", "xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \"", "the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the", ".attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var dots =", "\"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color: #2A3F5E }", "\")\"); // X and Y scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"])", "{} dots .attr(\"fill\", d => { let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor", "legendScale = num => { var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return", "datapoints var dots = svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip", ".attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30", "d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update", ".attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to get the right orientation of", "position: absolute; left: 650px; top: 20px; width: auto; height: 500px; } </style> <script>", "none; background-color: white; padding: 5px; } .legend { background-color: white; position: absolute; left:", "data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x]", "require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) })", "it's invisible and its position/contents are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\")", "width: 240px; text-align: right\"> <span> Color by feature: </span> </label> <select id=$select_id> $options", "continuous (numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\") var width = 30,", "5px; } .legend { background-color: white; position: absolute; left: 650px; top: 20px; width:", "linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to get the right orientation", "feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x]", "dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the", ".ticks(5) } // add grid lines // add the X gridlines svg.append(\"g\") .attr(\"class\",", ".attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis function", "to the body container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip", "} .chart { background-color: #E5ECF6; display: relative; } .axis path, .axis line {", "\"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <= 100;", "i <= 5; i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 +", ".grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid", "55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } }", "max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\">", "$axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-'", "\"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width:", "color; let type = $feature_types[feature]; if (type === \"categorical\") { color = d3.scaleOrdinal(colors);", "=> y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) //", "configure mychart via url, we can eliminate this define here define($chart_id, ['d3'], function(d3)", "handler var tipMouseover = d => { // x and y numeric labels", ".tooltip { position: absolute; font-size: 12px; width: auto; height: auto; pointer-events: none; background-color:", "color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <= 100; i +=", "dots .attr(\"fill\", d => { let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return", "'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options =", "+ height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines", "are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); //", "15) + \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create the", "- margin.top - margin.bottom; // append the svg object to the body of", "10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20)", "the continuous (numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\") var width =", "padding: 5px; } .legend { background-color: white; position: absolute; left: 650px; top: 20px;", "for x in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())]", "the new color map } else { let feature_domain = $num_feature_ranges[feature] color =", "+ \",\" + margin.top + \")\"); // X and Y scales and Axis", "font-size: 12px; width: auto; height: auto; pointer-events: none; background-color: white; padding: 5px; }", "html = xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat +", ".call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\",", "create the categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\") // clear current", "+ Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\"", "relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart)", "// If we configure mychart via url, we can eliminate this define here", "from textwrap import dedent from IPython.core.display import display, HTML from string import Template", "yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html", "30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i", "</select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div>", "# map each feature to type num_feature_ranges = {} for x in features:", ".text(yCat); // gridlines in x axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5)", "labels let html = xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" +", "'#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type = $feature_types[feature]; if (type ===", "d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the option that has been chosen", "updateLegendNum(domain) { // create the continuous (numerical) legend var legend = d3.select(\"#\" +", "''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-'", "// A function that update the chart with the new color coding scheme", "=> { var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\")", "margin.left + margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" +", "{ mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({", "+ height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\",", "dedent from IPython.core.display import display, HTML from string import Template import numpy as", "str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes),", "return d3.axisBottom(x) .ticks(5) } // gridlines in y axis function function make_y_gridlines() {", "mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id':", "value ='{x}'>{x}</option>\" for x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat =", "5; i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 +", "var selectedOption = d3.select(this).property(\"value\") // run the updateChart function with this selected option", "If we configure mychart via url, we can eliminate this define here define($chart_id,", "height = 400 - margin.top - margin.bottom; // append the svg object to", ".attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\")", "{ return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) { var", "+ figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height + margin.top +", "// it's invisible and its position/contents are defined during mouseover var tooltip =", "page var svg = d3.select('#' + figure_id) .attr(\"width\", width + margin.left + margin.right)", "plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {} # map", "let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\",", ".attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that", "0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature])", "the legend container to the body container var legend = d3.select(\"#\" + legend_id).attr(\"y\",", "35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\")", "1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color =", "color = d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d => { let", "50}, width = 650 - margin.left - margin.right, height = 400 - margin.top", ".dot { stroke: #fff; opacity: 0.8; } .grid line { stroke: #fff; stroke-opacity:", ".attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler var tipMouseout = d =>", "</label> <select id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div", "dotColor }) updateLegendCat(featureColors) // update the legend with the new color map }", "axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add grid lines", "var legend = d3.select(\"#\" + legend_id).html(\"\") var width = 30, height = 300;", "\"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else:", "2px; shape-rendering: crispEdges; } .grid path { stroke-width: 0; } .tooltip { position:", "<script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart", "+ \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create the categorical", "label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 )", "(type === \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\",", "init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features ]) dict_data", "30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) }", "\"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors)", "var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']", "=> { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't care about position!", "$data, $x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-'", "5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to get the right", "tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't care about position! }; var", ".style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis function function make_x_gridlines() { return", "d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d => { let dotColor =", "stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E; } .selection { margin-bottom:", "legend_id).html(\"\") var width = 30, height = 300; // add legend title legend.append(\"text\")", "repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())),", "feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor", ".call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines()", "var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40, right: 10,", "d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event handler var tipMouseover = d", "+ margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left", "}) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features):", "append the svg object to the body of the page var svg =", "{ colorFeature = feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3',", "+ str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat':", "Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" //", "} .axis path, .axis line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; }", "var legend = d3.select(\"#\" + legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\",", "Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\");", ".attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout)", "absolute; left: 650px; top: 20px; width: auto; height: 500px; } </style> <script> require.config({", "opacity: 0.8; } .grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering:", "1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\",", "10, bottom: 50, left: 50}, width = 650 - margin.left - margin.right, height", "} // add grid lines // add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\")", "d3.axisBottom(x) .ticks(5) } // gridlines in y axis function function make_y_gridlines() { return", "stroke: #fff; opacity: 0.8; } .grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width:", "new color coding scheme function update(feature) { colorFeature = feature var colors =", ".tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\")", "x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]);", "and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2", "numpy as np # function to initialize a scatter plot def init_chart(data,features): chart_id", "d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\",", "500px; } </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If", ".tickFormat(\"\") ) // Add the datapoints var dots = svg .selectAll() .data(data) .enter()", "dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d)", ".attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature)", "// recover the option that has been chosen var selectedOption = d3.select(this).property(\"value\") //", "'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data':", "none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E; } .selection {", "= {} for x in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain =", "features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] ==", "+ 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function", "= ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color;", "mouseout event handler var tipMouseout = d => { tooltip.transition() .duration(0) // ms", "{ let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) //", "for x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\":", "= feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font:", "font: 11px sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6; display: relative; }", "const legendScale = num => { var scale = d3.scaleLinear() .domain([5, 0]) .range(domain)", "that update the chart with the new color coding scheme function update(feature) {", "else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif;", "\"translate(\" + margin.left + \",\" + margin.top + \")\"); // X and Y", "and Y scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var", "= d3.select(\"#\" + legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\",", "+ legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature)", ".chart { background-color: #E5ECF6; display: relative; } .axis path, .axis line { fill:", "0); // don't care about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7])", ".attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain) { // create the continuous", ".attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\")", "X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height)", "else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>'))", "{ stroke: #fff; opacity: 0.8; } .grid line { stroke: #fff; stroke-opacity: 0.7;", "= 650 - margin.left - margin.right, height = 400 - margin.top - margin.bottom;", "return dotColor }) updateLegendCat(featureColors) // update the legend with the new color map", "= d3.select('#' + figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height +", "legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event handler var tipMouseover", "} .grid path { stroke-width: 0; } .tooltip { position: absolute; font-size: 12px;", "'#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type = $feature_types[feature]; if", ".style(\"opacity\", 0); // Add the legend container to the body container var legend", "width + margin.left + margin.right) .attr(\"height\", height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\",", "2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label", "left: 650px; top: 20px; width: auto; height: 500px; } </style> <script> require.config({ paths:", "Color by feature: </span> </label> <select id=$select_id> $options </select> </div> <div style=\"position: relative\">", "container to the body container // it's invisible and its position/contents are defined", "body container // it's invisible and its position/contents are defined during mouseover var", "+ select_id).on(\"change\", function(d) { // recover the option that has been chosen var", "= \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color: #2A3F5E", "legend with the new color map } else { let feature_domain = $num_feature_ranges[feature]", "height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat,", "chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features", "\"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\")", "} </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we", "// add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height +", "make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines in y axis function function", "{ legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\")", ".enter() .append(\"circle\") // Add the tooltip container to the body container // it's", "\" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY -", "margin.right, height = 400 - margin.top - margin.bottom; // append the svg object", ".attr(\"x\", width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add", "// Add the tooltip container to the body container // it's invisible and", ".attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i <= 5; i", "= 0; i <= 5; i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\",", "300; // add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\")", "np # function to initialize a scatter plot def init_chart(data,features): chart_id = 'mychart-'", "+ margin.top + \")\"); // X and Y scales and Axis var x", "650px; top: 20px; width: auto; height: 500px; } </style> <script> require.config({ paths: {", "(let i = 0; i <= 5; i += 1) { legend.append(\"text\") .attr(\"x\",", "} // tooltip mouseout event handler var tipMouseout = d => { tooltip.transition()", "def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {} # map each", "height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\")", "content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0", ".on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update the chart with", "repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value", "return d3.axisLeft(y) .ticks(5) } // add grid lines // add the X gridlines", "+ 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i", "{ font: 11px sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6; display: relative;", "with this selected option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types':", ".style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create the categorical legend var legend", "by feature: </span> </label> <select id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg", "= data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style>", "we can eliminate this define here define($chart_id, ['d3'], function(d3) { return function (figure_id,", "repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat), 'size_cat': repr(size_cat),", "= d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); //", "// Add the legend container to the body container var legend = d3.select(\"#\"", ".style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler var tipMouseout =", "+ \"<br><br>\" // color feature label html += colorFeature + \": \" +", "about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d", "string import Template import numpy as np # function to initialize a scatter", "src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color: #2A3F5E } .chart {", "d3.axisLeft(y) .ticks(5) } // add grid lines // add the X gridlines svg.append(\"g\")", "\"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i", "in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] =", "-(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); //", "\"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature", "} .dot { stroke: #fff; opacity: 0.8; } .grid line { stroke: #fff;", "=> x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\",", "for (let i = 0; i <= 5; i += 1) { legend.append(\"text\")", "d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d =>", "margin.top + \")\"); // X and Y scales and Axis var x =", "(let i = 0; i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i", ".style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\")", "this selected option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types),", "''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id =", "from IPython.core.display import display, HTML from string import Template import numpy as np", "Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/", ".attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0;", "0); // Add the legend container to the body container var legend =", "invisible and its position/contents are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\",", "feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x]", "d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40, right: 10, bottom: 50, left:", "= 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color", "['d3'], function(d3) { return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes)", "= $feature_types[feature]; if (type === \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors =", "handler var tipMouseout = d => { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0);", "svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\")", "legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\");", "// Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height", "=> { let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\"", "]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])]", "scheme function update(feature) { colorFeature = feature var colors = ['#636EFA', '#EF553B', '#00CC96',", "the new color coding scheme function update(feature) { colorFeature = feature var colors", ".attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0;", ".attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i +=", "else { let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d =>", "{ stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path {", "function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) }) </script>", "let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let", "top: 20px; width: auto; height: 500px; } </style> <script> require.config({ paths: { 'd3':", ".append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\"); // X", "let featureColors = {} dots .attr(\"fill\", d => { let dotColor = color(d[feature])", ".grid path { stroke-width: 0; } .tooltip { position: absolute; font-size: 12px; width:", "+= colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) +", "background-color: #E5ECF6; display: relative; } .axis path, .axis line { fill: none; stroke:", "0); // tooltip mouseover event handler var tipMouseover = d => { //", "// tooltip mouseover event handler var tipMouseover = d => { // x", "// update the legend with the new color map } else { let", ") .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function", "}) updateLegendCat(featureColors) // update the legend with the new color map } else", "colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var", "// create the categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\") // clear", "in y axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add", "\"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2)", "0; i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\",", "+ str(uuid.uuid4()) feature_types = {} # map each feature to type num_feature_ranges =", "// gridlines in y axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) }", "dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) // update the", "for (let i = 0; i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\",", "'#FECB52'] var color; let type = $feature_types[feature]; if (type === \"categorical\") { color", "the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines()", ".attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 })", "{ background-color: #E5ECF6; display: relative; } .axis path, .axis line { fill: none;", "= xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \":", "} } // tooltip mouseout event handler var tipMouseout = d => {", "}) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())),", "}); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id", "y axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add grid", "= {} # map each feature to type num_feature_ranges = {} for x", "height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i <= 5; i +=", "= 400 - margin.top - margin.bottom; // append the svg object to the", "tipMouseout = d => { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't", "the updateChart function with this selected option update(selectedOption) }); } }) </script> ''')).substitute({", "right orientation of gradient const legendScale = num => { var scale =", "uuid from textwrap import dedent from IPython.core.display import display, HTML from string import", "add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add", "#2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E; } .selection { margin-bottom: 20px;", "['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let", "} // gridlines in y axis function function make_y_gridlines() { return d3.axisLeft(y) .ticks(5)", "=> { let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors)", "color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor = color(d[feature]) return", "yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin =", "chosen var selectedOption = d3.select(this).property(\"value\") // run the updateChart function with this selected", "+ str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat), 'size_cat': repr(size_cat), 'options':", "</span> </label> <select id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg>", "shape-rendering: crispEdges; } .label { color: #2A3F5E; } .selection { margin-bottom: 20px; }", "display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color: #2A3F5E } .chart { background-color:", "display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px sans-serif; color: #2A3F5E } .chart", ") .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain) {", "// don't care about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\",", "function to initialize a scatter plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4())", "+= 1 }) } function updateLegendNum(domain) { // create the continuous (numerical) legend", "'#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type = $feature_types[feature]; if (type", "d3.select('#' + figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\", height + margin.top", "=== \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d", "= \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\"", "auto; pointer-events: none; background-color: white; padding: 5px; } .legend { background-color: white; position:", "class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id,", "i = 0; i <= 5; i += 1) { legend.append(\"text\") .attr(\"x\", 55)", "function with this selected option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id),", "= dotColor return dotColor }) updateLegendCat(featureColors) // update the legend with the new", "lines // add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height", "shape-rendering: crispEdges; } .grid path { stroke-width: 0; } .tooltip { position: absolute;", "<= 5; i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2", "= d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor = color(d[feature]) return dotColor", ".attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient", "svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") )", "feature: </span> </label> <select id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id", "#fff; opacity: 0.8; } .grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px;", "absolute; font-size: 12px; width: auto; height: auto; pointer-events: none; background-color: white; padding: 5px;", "+ legend_id).attr(\"y\", 0); // tooltip mouseover event handler var tipMouseover = d =>", "Add the legend container to the body container var legend = d3.select(\"#\" +", "features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features ]) dict_data =", "the body container // it's invisible and its position/contents are defined during mouseover", "background-color: white; padding: 5px; } .legend { background-color: white; position: absolute; left: 650px;", ") .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis", "and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear()", ".style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\",", "get the right orientation of gradient const legendScale = num => { var", "feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain", "width = 650 - margin.left - margin.right, height = 400 - margin.top -", "legend = d3.select(\"#\" + legend_id).html(\"\") var width = 30, height = 300; //", "A function that update the chart with the new color coding scheme function", "// X and Y scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0,", "with the new color map } else { let feature_domain = $num_feature_ranges[feature] color", "(d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { //", "((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event handler var", "$num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor = color(d[feature])", "type = $feature_types[feature]; if (type === \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors", "0]); // Add X-axis and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\"", ".attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat);", "var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) )", "'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option", "legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var", "</div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes", "import dedent from IPython.core.display import display, HTML from string import Template import numpy", "12px; width: auto; height: auto; pointer-events: none; background-color: white; padding: 5px; } .legend", "features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]),", "= d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event handler var tipMouseover =", "dots = svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip container to", "}; function updateLegendCat(featureColors) { // create the categorical legend var legend = d3.select(\"#\"", "body { font: 11px sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6; display:", "'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features)", ".domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width)", "\"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d", "= d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d => { let dotColor", "+ \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height +", "import display, HTML from string import Template import numpy as np # function", "feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent('''", "init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {} # map each feature", "Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html += colorFeature + \": \"", "function updateLegendCat(featureColors) { // create the categorical legend var legend = d3.select(\"#\" +", "20*i ) .text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain)", "color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) // update the legend with", ".text(xCat); // Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\",", "= d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend container to the", "Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\")", "scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\",", ".attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d =>", "#fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path { stroke-width: 0;", "dotColor return dotColor }) updateLegendCat(featureColors) // update the legend with the new color", "=> { // x and y numeric labels let html = xCat +", "svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var dots", "+ margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\");", "legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let", "// add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) //", "}) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the option", "in x axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines", "display: relative; } .axis path, .axis line { fill: none; stroke: #2A3F5E; shape-rendering:", "axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines in y", "html += colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10)", "num => { var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) }", "margin-bottom: 20px; } .dot { stroke: #fff; opacity: 0.8; } .grid line {", "$legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id),", "var width = 30, height = 300; // add legend title legend.append(\"text\") .attr(\"x\",", "repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat),", "auto; height: auto; pointer-events: none; background-color: white; padding: 5px; } .legend { background-color:", "pointer-events: none; background-color: white; padding: 5px; } .legend { background-color: white; position: absolute;", "+ \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\") .attr(\"class\",", "str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat), 'size_cat': repr(size_cat), 'options': repr(features_html_options)", "d3.select(this).property(\"value\") // run the updateChart function with this selected option update(selectedOption) }); }", "var svg = d3.select('#' + figure_id) .attr(\"width\", width + margin.left + margin.right) .attr(\"height\",", "background-color: white; position: absolute; left: 650px; top: 20px; width: auto; height: 500px; }", "+ 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label svg .append('g') .call(d3.axisLeft(y));", ".range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height)", "if (type === \"categorical\") { color = d3.scaleOrdinal(colors); let featureColors = {} dots", "crispEdges; } .label { color: #2A3F5E; } .selection { margin-bottom: 20px; } .dot", "mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend", "= [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] =", "// ms .style(\"opacity\", 0); // don't care about position! }; var sizeScale =", "def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x", "in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\":", "container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event handler", "= 'mychart-' + str(uuid.uuid4()) feature_types = {} # map each feature to type", "d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature)", "dots .attr(\"fill\", d => { let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain)", "inline-block; width: 240px; text-align: right\"> <span> Color by feature: </span> </label> <select id=$select_id>", "11px sans-serif; color: #2A3F5E } .chart { background-color: #E5ECF6; display: relative; } .axis", ".attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat);", "return chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\"", "0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\",", "-30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis function function", "1 }) } function updateLegendNum(domain) { // create the continuous (numerical) legend var", "} .tooltip { position: absolute; font-size: 12px; width: auto; height: auto; pointer-events: none;", "{ // create the categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\") //", "\"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain) { // create the", "Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"])", "\")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height + 35)", "svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\")", "= data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent('''", "featureColors = {} dots .attr(\"fill\", d => { let dotColor = color(d[feature]) featureColors[d[feature]]", "\"14px\") let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i)", "that has been chosen var selectedOption = d3.select(this).property(\"value\") // run the updateChart function", "} }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)}))) return chart_id def", "auto; height: 500px; } </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } })", "\": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY", "= 0; i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\")", "f\"<option value ='{x}'>{x}</option>\" for x in features ]) dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat", "line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path", "repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data),", "= svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip container to the", ".attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient = legend.append(\"defs\")", ".attr(\"stop-color\", color(100-i)); // to get the right orientation of gradient const legendScale =", "width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i <= 5;", "let dotColor = color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\",", "updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the option that", "d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend container to the body", "feature to type num_feature_ranges = {} for x in features: if data[x].dtype in", "\"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script", "gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\")", "width = 30, height = 300; // add legend title legend.append(\"text\") .attr(\"x\", 15)", "recover the option that has been chosen var selectedOption = d3.select(this).property(\"value\") // run", "i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30 + textHeight/2 + ((height-textHeight*6)/5)*i)", "object to the body of the page var svg = d3.select('#' + figure_id)", "stroke-width: 0; } .tooltip { position: absolute; font-size: 12px; width: auto; height: auto;", ") // Add the datapoints var dots = svg .selectAll() .data(data) .enter() .append(\"circle\")", "as np # function to initialize a scatter plot def init_chart(data,features): chart_id =", "of gradient const legendScale = num => { var scale = d3.scaleLinear() .domain([5,", "y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A", "style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color by feature: </span> </label> <select", "== feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else:", "<div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script>", ".attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the", "option update(selectedOption) }); } }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'feature_types': repr(feature_types), 'num_feature_ranges': repr(num_feature_ranges)})))", "data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body", "let html = xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat", "\")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\")", "legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature", "+ \"%\") .attr(\"stop-color\", color(100-i)); // to get the right orientation of gradient const", "margin.left - margin.right, height = 400 - margin.top - margin.bottom; // append the", "30 + textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip", "coding scheme function update(feature) { colorFeature = feature var colors = ['#636EFA', '#EF553B',", "url, we can eliminate this define here define($chart_id, ['d3'], function(d3) { return function", "define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat,", "body of the page var svg = d3.select('#' + figure_id) .attr(\"width\", width +", "let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) // update", "initialize a scatter plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types =", ".attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <=", "x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat]) ) .attr(\"r\", d => sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover)", "// Add the datapoints var dots = svg .selectAll() .data(data) .enter() .append(\"circle\") //", ".attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature =>", "'#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52'] var color; let type = $feature_types[feature];", ".style(\"opacity\", 0); // don't care about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"])", "= init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in features ])", "require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart via", "Add X-axis and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height", "container to the body container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); //", "width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data, $x_cat, $y_cat, $size_cat,", "\"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\">", "<label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color by feature: </span>", "stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; } .grid path { stroke-width:", ".9) }; function updateLegendCat(featureColors) { // create the categorical legend var legend =", "feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF',", ".selection { margin-bottom: 20px; } .dot { stroke: #fff; opacity: 0.8; } .grid", "event handler var tipMouseout = d => { tooltip.transition() .duration(0) // ms .style(\"opacity\",", "= {} dots .attr(\"fill\", d => { let dotColor = color(d[feature]) featureColors[d[feature]] =", ".attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\"); // X and", "{ \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display:", "// x and y numeric labels let html = xCat + \": \"", "d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add", ".data(data) .enter() .append(\"circle\") // Add the tooltip container to the body container //", "margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top +", "max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x]", "relative; } .axis path, .axis line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges;", "position/contents are defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0);", "svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\")", "var tipMouseover = d => { // x and y numeric labels let", "- 15) + \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) { // create", "// to get the right orientation of gradient const legendScale = num =>", "} .selection { margin-bottom: 20px; } .dot { stroke: #fff; opacity: 0.8; }", "white; padding: 5px; } .legend { background-color: white; position: absolute; left: 650px; top:", "y numeric labels let html = xCat + \": \" + Number((d[xCat]).toFixed(3)) +", "height: 500px; } </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) //", "orientation of gradient const legendScale = num => { var scale = d3.scaleLinear()", "= d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40, right: 10, bottom: 50,", "240px; text-align: right\"> <span> Color by feature: </span> </label> <select id=$select_id> $options </select>", "if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] =", "\" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html += colorFeature +", ".attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and label svg", "return function (figure_id, legend_id, select_id, data, xCat, yCat, sizeCat, axes) { var initialFeature", "=> sizeScale(d[sizeCat[\"label\"]])) .on(\"mouseover\", tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update the", ".attr(\"class\", \"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\",", "{ return d3.axisBottom(x) .ticks(5) } // gridlines in y axis function function make_y_gridlines()", "position: absolute; font-size: 12px; width: auto; height: auto; pointer-events: none; background-color: white; padding:", "this define here define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id, select_id, data,", "updateLegendCat(featureColors) { // create the categorical legend var legend = d3.select(\"#\" + legend_id).html(\"\")", "gradient const legendScale = num => { var scale = d3.scaleLinear() .domain([5, 0])", ".label { color: #2A3F5E; } .selection { margin-bottom: 20px; } .dot { stroke:", "- margin.left - margin.right, height = 400 - margin.top - margin.bottom; // append", "+ yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label", "class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id, $data,", "color coding scheme function update(feature) { colorFeature = feature var colors = ['#636EFA',", "{ // recover the option that has been chosen var selectedOption = d3.select(this).property(\"value\")", "\"end\") .text(xCat); // Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\")", ".append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i =", "4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i ) .text(feature) .style(\"font-size\",", "the right orientation of gradient const legendScale = num => { var scale", "d => { let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor })", "= $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor =", "to the body container // it's invisible and its position/contents are defined during", "in features: if data[x].dtype in [\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1]", "+ textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout", "event handler var tipMouseover = d => { // x and y numeric", "= num => { var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0)", "= d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label svg .append('g') .attr(\"class\",", "function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines in y axis", "x and y numeric labels let html = xCat + \": \" +", "var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\", 20)", "HTML from string import Template import numpy as np # function to initialize", "height + \")\") .call(d3.axisBottom(x)) svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", width / 2) .attr(\"y\", height", "the body container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover", "legend container to the body container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0);", "= 300; // add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\",", "update the chart with the new color coding scheme function update(feature) { colorFeature", "// run the updateChart function with this selected option update(selectedOption) }); } })", "the datapoints var dots = svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the", "we configure mychart via url, we can eliminate this define here define($chart_id, ['d3'],", "data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div", "= legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let", "data, xCat, yCat, sizeCat, axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var", "feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name num_feature_ranges[x] = feature_domain else: feature_types[x] =", "add the X gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\")", ".attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 +", "import uuid from textwrap import dedent from IPython.core.display import display, HTML from string", "[min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\" else: feature_types[x] = data[x].dtype.name", "height + margin.top + margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" +", "X-axis and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height +", "+ Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html += colorFeature + \":", "+ \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9) }; function", "} d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the option that has been", "\"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px;", ".attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\")", "} display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span>", "$x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' +", "} else { let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d", "the chart with the new color coding scheme function update(feature) { colorFeature =", "legend var legend = d3.select(\"#\" + legend_id).html(\"\") // clear current legend content legend.append(\"text\")", "run the updateChart function with this selected option update(selectedOption) }); } }) </script>", "0; i <= 5; i += 1) { legend.append(\"text\") .attr(\"x\", 55) .attr(\"y\", 30", "mychart via url, we can eliminate this define here define($chart_id, ['d3'], function(d3) {", "chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {} # map each feature to", "Add the tooltip container to the body container // it's invisible and its", "Add Y-axis and label svg .append('g') .call(d3.axisLeft(y)); svg.append(\"text\") .attr(\"class\", \"label\") .attr(\"x\", -(height -", "don't care about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\")", "} .grid line { stroke: #fff; stroke-opacity: 0.7; stroke-width: 2px; shape-rendering: crispEdges; }", "i += 1 }) } function updateLegendNum(domain) { // create the continuous (numerical)", "100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to", "#2A3F5E } .chart { background-color: #E5ECF6; display: relative; } .axis path, .axis line", "white; position: absolute; left: 650px; top: 20px; width: auto; height: 500px; } </style>", "width: auto; height: 500px; } </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', }", "scales and Axis var x = d3.scaleLinear() .domain(axes[\"x\"]) .range([0, width]); var y =", "\"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x axis function function make_x_gridlines() {", ".domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d => y(d[yCat])", "the tooltip container to the body container // it's invisible and its position/contents", ".attr(\"fill\", d => { let dotColor = color(d[feature]) featureColors[d[feature]] = dotColor return dotColor", ".attr(\"font-size\", \"14px\") var textHeight = 1; var linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\")", "height: auto; pointer-events: none; background-color: white; padding: 5px; } .legend { background-color: white;", "} legend.append(\"rect\") .attr(\"x\", 20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for", "path { stroke-width: 0; } .tooltip { position: absolute; font-size: 12px; width: auto;", "sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\",", "tipMouseout) update(initialFeature) // A function that update the chart with the new color", "\": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color feature label html += colorFeature", "} }) // If we configure mychart via url, we can eliminate this", "{ var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40, right:", "{ tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't care about position! };", "body container var legend = d3.select(\"#\" + legend_id).attr(\"y\", 0); // tooltip mouseover event", "} } d3.select(\"#\" + select_id).on(\"change\", function(d) { // recover the option that has", "initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40, right: 10, bottom:", "400 - margin.top - margin.bottom; // append the svg object to the body", "i + \"%\") .attr(\"stop-color\", color(100-i)); // to get the right orientation of gradient", ".call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var dots = svg .selectAll()", "+= 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i)); // to get the", "\"%\") .attr(\"stop-color\", color(100-i)); // to get the right orientation of gradient const legendScale", "textHeight/2 + ((height-textHeight*6)/5)*i) .text(legendScale(i)) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\"); } } // tooltip mouseout event", "update the legend with the new color map } else { let feature_domain", "new color map } else { let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain)", "</div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div>", "{ let feature_domain = $num_feature_ranges[feature] color = d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => {", "= { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\"", "left: 50}, width = 650 - margin.left - margin.right, height = 400 -", "<div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block; width: 240px; text-align: right\"> <span> Color by", ".style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9) }; function updateLegendCat(featureColors) {", "// append the svg object to the body of the page var svg", "+ \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\",", "feature_domain else: feature_types[x] = \"categorical\" display(HTML('<script src=\"/static/components/requirejs/require.js\"></script>')) display(HTML(Template(dedent(''' <style> body { font: 11px", "color(100-i)); // to get the right orientation of gradient const legendScale = num", "label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(d3.axisBottom(x))", "20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40) .attr(\"y\", 30 + 20*i )", "= d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat]) ) .attr(\"cy\", d", "<div class=\"legend\"><svg id=$legend_id height=500 width=400></svg></div> </div> <script> require([$chart_id], function(mychart) { mychart($figure_id, $legend_id, $select_id,", "display, HTML from string import Template import numpy as np # function to", "scatter plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {} #", "margin = {top: 40, right: 10, bottom: 50, left: 50}, width = 650", "bottom: 50, left: 50}, width = 650 - margin.left - margin.right, height =", "mouseover event handler var tipMouseover = d => { // x and y", "{ color = d3.scaleOrdinal(colors); let featureColors = {} dots .attr(\"fill\", d => {", "with the new color coding scheme function update(feature) { colorFeature = feature var", "numeric labels let html = xCat + \": \" + Number((d[xCat]).toFixed(3)) + \"<br>\"", "fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color: #2A3F5E; } .selection", "[\"int64\", \"float64\"]: feature_domain = [min(data[x].dropna()), max(data[x].dropna())] if feature_domain[1] == feature_domain[0]: feature_types[x] = \"categorical\"", "width / 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis", "2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in x", "\"url(#linear-gradient)\"); for (let i = 0; i <= 5; i += 1) {", "{ var scale = d3.scaleLinear() .domain([5, 0]) .range(domain) return Number((scale(num))).toFixed(0) } legend.append(\"rect\") .attr(\"x\",", "i <= 100; i += 5) linearGradient.append(\"stop\") .attr(\"offset\", i + \"%\") .attr(\"stop-color\", color(100-i));", "tipMouseover) .on(\"mouseout\", tipMouseout) update(initialFeature) // A function that update the chart with the", "Add the datapoints var dots = svg .selectAll() .data(data) .enter() .append(\"circle\") // Add", "$size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id':", "(numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\") var width = 30, height", "label html += colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX +", "\"label\") .attr(\"x\", -(height - 15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\")", ".text(colorFeature) .attr(\"font-size\", \"14px\") let i = 0 Object.keys(featureColors).forEach(feature => { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30", ".append(\"circle\") // Add the tooltip container to the body container // it's invisible", "and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" + height + \")\")", "// Add X-axis and label svg .append('g') .attr(\"class\", \"x axis\") .attr(\"transform\", \"translate(0,\" +", ".domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and label svg .append('g') .attr(\"class\", \"x axis\")", "dict_data = data.replace(np.nan, \"N/A\").to_dict(\"records\") size_cat = { \"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] }", "\" + Number((d[xCat]).toFixed(3)) + \"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3)) +", "\"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add", ") }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())), 'legend_id': repr('leg-' +", "sizeCat, axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin = {top:", "feature label html += colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX", "position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d =>", "gridlines in x axis function function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } //", "margin.bottom) .append(\"g\") .attr(\"transform\", \"translate(\" + margin.left + \",\" + margin.top + \")\"); //", "50, left: 50}, width = 650 - margin.left - margin.right, height = 400", ".text(feature) .style(\"font-size\", \"14px\") .attr(\"alignment-baseline\",\"middle\") i += 1 }) } function updateLegendNum(domain) { //", "// create the continuous (numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\") var", "= d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <= 100; i += 5)", "{ position: absolute; font-size: 12px; width: auto; height: auto; pointer-events: none; background-color: white;", "// tooltip mouseout event handler var tipMouseout = d => { tooltip.transition() .duration(0)", "color(d[feature]) return dotColor }) updateLegendNum(feature_domain) } } d3.select(\"#\" + select_id).on(\"change\", function(d) { //", "\"label\": \"n_reads\", \"range\": [min(data[\"n_reads\"]), max(data[\"n_reads\"])] } display(HTML(Template(dedent(''' <div class=\"selection\"> <label for=\"colorFeature\" style=\"display: inline-block;", "color: #2A3F5E } .chart { background-color: #E5ECF6; display: relative; } .axis path, .axis", ".range([0, width]); var y = d3.scaleLinear() .domain(axes[\"y\"]) .range([height, 0]); // Add X-axis and", "add legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight", ".axis path, .axis line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label", "$select_id, $data, $x_cat, $y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id':", "'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes': repr(axes), 'x_cat': repr(x_cat), 'y_cat': repr(y_cat), 'size_cat':", ") // add the Y gridlines svg.append(\"g\") .attr(\"class\", \"grid\") .call(make_y_gridlines() .tickSize(-width) .tickFormat(\"\") )", "{ // create the continuous (numerical) legend var legend = d3.select(\"#\" + legend_id).html(\"\")", ".attr(\"class\", \"grid\") .attr(\"transform\", \"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) //", "colorFeature = feature var colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692',", "+ str(uuid.uuid4())), 'legend_id': repr('leg-' + str(uuid.uuid4())), 'select_id': repr('sel-' + str(uuid.uuid4())), 'data': repr(dict_data), 'axes':", "color feature label html += colorFeature + \": \" + d[colorFeature] tooltip.html(html) .style(\"left\",", "'mychart-' + str(uuid.uuid4()) feature_types = {} # map each feature to type num_feature_ranges", ".selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip container to the body container", "care about position! }; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\",", "{ background-color: white; position: absolute; left: 650px; top: 20px; width: auto; height: 500px;", "\"tooltip\") .style(\"opacity\", 0); // Add the legend container to the body container var", "chart_id def scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for", "here define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id, select_id, data, xCat, yCat,", ".tickSize(-width) .tickFormat(\"\") ) // Add the datapoints var dots = svg .selectAll() .data(data)", "'#FF97FF', '#FECB52'] var color; let type = $feature_types[feature]; if (type === \"categorical\") {", "// color feature label html += colorFeature + \": \" + d[colorFeature] tooltip.html(html)", "= {top: 40, right: 10, bottom: 50, left: 50}, width = 650 -", "+ select_id).property(\"value\") var margin = {top: 40, right: 10, bottom: 50, left: 50},", "<select id=$select_id> $options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg", "= d => { tooltip.transition() .duration(0) // ms .style(\"opacity\", 0); // don't care", ".attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add the legend container to the body container", "10) + \"px\") .style(\"top\", (d3.event.pageY - 15) + \"px\") .transition() .style(\"opacity\", .9) };", "a scatter plot def init_chart(data,features): chart_id = 'mychart-' + str(uuid.uuid4()) feature_types = {}", "$options </select> </div> <div style=\"position: relative\"> <svg id=$figure_id class='chart'></svg> <div class=\"legend\"><svg id=$legend_id height=500", "str(uuid.uuid4()) feature_types = {} # map each feature to type num_feature_ranges = {}", "// clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\")", "{ stroke-width: 0; } .tooltip { position: absolute; font-size: 12px; width: auto; height:", "IPython.core.display import display, HTML from string import Template import numpy as np #", "{ legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\", 40)", "\"translate(0,\" + height + \")\") .call(make_x_gridlines() .tickSize(-height) .tickFormat(\"\") ) // add the Y", "$y_cat, $size_cat, $axes ) }) </script> ''')).substitute({ 'chart_id': repr(chart_id), 'figure_id': repr('fig-' + str(uuid.uuid4())),", "+ \"<br>\" + yCat + \": \" + Number((d[yCat]).toFixed(3)) + \"<br><br>\" // color", "d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for (let i = 0; i <= 100; i += 5) linearGradient.append(\"stop\")", "= color(d[feature]) featureColors[d[feature]] = dotColor return dotColor }) updateLegendCat(featureColors) // update the legend", "- margin.bottom; // append the svg object to the body of the page", "map each feature to type num_feature_ranges = {} for x in features: if", "15)/ 2 ) .attr(\"y\", -30) .attr(\"transform\", \"rotate(-90)\") .style(\"text-anchor\", \"end\") .text(yCat); // gridlines in", "svg .selectAll() .data(data) .enter() .append(\"circle\") // Add the tooltip container to the body", "+ legend_id).html(\"\") var width = 30, height = 300; // add legend title", "and y numeric labels let html = xCat + \": \" + Number((d[xCat]).toFixed(3))", "axes) { var initialFeature = d3.select(\"#\" + select_id).property(\"value\") var margin = {top: 40,", "scatter_plot(data,x_cat,y_cat,axes,features): chart_id = init_chart(data,features) features_html_options = \"\".join([ f\"<option value ='{x}'>{x}</option>\" for x in", "linearGradient = legend.append(\"defs\") .append(\"linearGradient\") .attr(\"id\", \"linear-gradient\") .attr(\"gradientTransform\", \"rotate(90)\"); var color = d3.scaleSequential(d3.interpolatePlasma).domain([0,100]) for", "current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") let i", ".ticks(5) } // gridlines in y axis function function make_y_gridlines() { return d3.axisLeft(y)", "legend title legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10) .text(colorFeature) .attr(\"font-size\", \"14px\") var textHeight =", "}) // If we configure mychart via url, we can eliminate this define", "type num_feature_ranges = {} for x in features: if data[x].dtype in [\"int64\", \"float64\"]:", "'d3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min', } }) // If we configure mychart via url, we can", "function make_y_gridlines() { return d3.axisLeft(y) .ticks(5) } // add grid lines // add", "} function updateLegendNum(domain) { // create the continuous (numerical) legend var legend =", "function make_x_gridlines() { return d3.axisBottom(x) .ticks(5) } // gridlines in y axis function", "d3.select(\"#\" + legend_id).html(\"\") // clear current legend content legend.append(\"text\") .attr(\"x\", 15) .attr(\"y\", 10)", "20px; width: auto; height: 500px; } </style> <script> require.config({ paths: { 'd3': 'https://cdnjs.cloudflare.com/ajax/libs/d3/5.16.0/d3.min',", ".attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i = 0; i <=", "<span> Color by feature: </span> </label> <select id=$select_id> $options </select> </div> <div style=\"position:", "margin.top - margin.bottom; // append the svg object to the body of the", "can eliminate this define here define($chart_id, ['d3'], function(d3) { return function (figure_id, legend_id,", "/ 2) .attr(\"y\", height + 35) .style(\"text-anchor\", \"end\") .text(xCat); // Add Y-axis and", "var color; let type = $feature_types[feature]; if (type === \"categorical\") { color =", "d3.scaleSequential(d3.interpolatePlasma).domain(feature_domain) dots .attr(\"fill\", d => { let dotColor = color(d[feature]) return dotColor })", "of the page var svg = d3.select('#' + figure_id) .attr(\"width\", width + margin.left", "+ d[colorFeature] tooltip.html(html) .style(\"left\", (d3.event.pageX + 10) + \"px\") .style(\"top\", (d3.event.pageY - 15)", "=> { legend.append(\"circle\") .attr(\"cx\",20) .attr(\"cy\",30 + 20*i) .attr(\"r\", 4) .style(\"fill\", featureColors[feature]) legend.append(\"text\") .attr(\"x\",", "20) .attr(\"y\", 30) .attr(\"width\", width) .attr(\"height\", height) .style(\"fill\", \"url(#linear-gradient)\"); for (let i =", "defined during mouseover var tooltip = d3.select(\"body\").append(\"div\") .attr(\"class\", \"tooltip\") .style(\"opacity\", 0); // Add", "}; var sizeScale = d3.scaleLinear() .domain(sizeCat[\"range\"]) .range([3,7]) dots.attr(\"class\", \"dot\") .attr(\"cx\", d => x(d[xCat])", ".axis line { fill: none; stroke: #2A3F5E; shape-rendering: crispEdges; } .label { color:", "'#B6E880', '#FF97FF', '#FECB52'] var color; let type = $feature_types[feature]; if (type === \"categorical\")" ]
[ ":return: Dataframe ''' length = len(df) if num < 1: num = int(length", "get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample", "sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试", "plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log 和", "- distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 -", "pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks = [] while loop: try:", "degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if data_norm: data_normed", "和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return:", "isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum()", "ylabel = 'Prob' if cmp: data = distribution_cdf(data) ylabel = 'CCDF' else: data", "in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num):", "data: list 或者 pandas.Series. :return: pandas.Series ''' if data is None: return None", "bins) cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index)", "axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot):", "ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max()", "数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah!", "style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8)", "pandas as pd import numpy as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs):", "* 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf *", "/ data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return:", "None if bins is None: bins = 200 if isinstance(data,pd.Series): data = data.values", "返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return:", "data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None):", "np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf =", "logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i", "计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注: *", "str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list ''' files", "for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf /", "in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return", "= cdf / cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): '''", "subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True)", "cmp: data = distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data) fg =", "None if not isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p =", "return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return:", "pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param", "cmp=False, grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm:", "xmax = np.max(data) xmin = np.min(data) data_new = (upper - lower) * (data", "normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp:", "data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax", "normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界", "bins = 200 if isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data, bins=bins,", "data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1]", "数据处理的方法 方法: # 功能 # - # 方法 # * 读取文件较大的csv - read_csv", "== filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif return_type == 'abspath':", "pandas.Series ''' if data is None: return None if not isinstance(data, pd.Series): data", "distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if data is", "start to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize", "return_type == 'name': return files elif return_type == 'abspath': files = [os.path.join(filedir, each", "logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in", "None: bins = 200 if isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data,", "= (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf", "num) df_sample = df.loc[ind_sample, :] else: df_sample = df return df_sample def distribution_fre(data):", "* 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random import pandas", "(upper - lower) * (data - xmin) / (xmax - xmin) + lower", "as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath:", "* 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all *", "2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os", ">= 1: for file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype)", "'name': return files elif return_type == 'abspath': files = [os.path.join(filedir, each + filetype)", "'abspath': files = [os.path.join(filedir, each + filetype) for each in files] return files", "num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据 :param", "or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif return_type", "data = pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p", "规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin = np.min(data) data_new = (upper", "return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或", "distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16 -", "in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log')", "hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random import pandas as pd import", "功能 # - # 方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 -", "if (filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name':", "np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath", "data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据", "bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density,", "目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # - # 方法 # * 读取文件较大的csv", "True chunkSize = 100000 chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize)", "data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if", "= (upper - lower) * (data - xmin) / (xmax - xmin) +", "is None: return None if not isinstance(data, pd.Series): data = pd.Series(data) data_count =", "= pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return cdf def plot_distribution(data, subplot=2,", "list ''' files = [] for filename in os.listdir(filedir): if (filetype is None", "分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame ''' print(' - - start to", "if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成", "和 数据处理的方法 方法: # 功能 # - # 方法 # * 读取文件较大的csv -", "list 或者 pandas.Series. :return: pandas.Series ''' if data is None: return None if", "num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df) if num < 1:", "= np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf", "is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs", "= pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录", "for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def", "for each in os.walk(filedir): if len(each[-1]) >= 1: for file_i in each[-1]: if(filetype", "pdf = distribution_pdf(data, bins) cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf", "data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp:", "files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir):", "cdf = cdf / cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True):", "= 'Prob' if cmp: data = distribution_cdf(data) ylabel = 'CCDF' else: data =", "= df.loc[ind_sample, :] else: df_sample = df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1", ":return: list ''' files = [] for each in os.walk(filedir): if len(each[-1]) >=", "files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif return_type == 'abspath': files =", "''' if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name)", "read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all", "dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 '''", "i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True)", "''' :param data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree", "to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize =", "def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame ''' print(' -", "[] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf", "# - # 方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs", "data_pdf,pandas.Series ''' if data is None: return None if bins is None: bins", "loop = False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): '''", ":param data: list 或者 pandas.Series. :return: pandas.Series ''' if data is None: return", ":param data: 数据 :return: data_pdf,pandas.Series ''' if data is None: return None if", "- hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random import pandas as pd", "style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for", "* 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改", "''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据 :param num:", "np.min(data) data_new = (upper - lower) * (data - xmin) / (xmax -", "== 'name': return files elif return_type == 'abspath': files = [os.path.join(filedir, each +", "loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data =", "''' if data is None: return None if bins is None: bins =", "< 1: num = int(length * num) inds = list(df.index) if num <=", "data_p = data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param", "import random import pandas as pd import numpy as np import matplotlib.pyplot as", ":return: list ''' files = [] for filename in os.listdir(filedir): if (filetype is", "axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes", "Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df) if num", "axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array,", "read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000", "import pandas as pd import numpy as np import matplotlib.pyplot as plt def", "reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks = [] while", "1: num = int(length * num) inds = list(df.index) if num <= length:", "str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list ''' files = [] for", "cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return cdf def", "csv :param readpath: filepath :return: pd.DataFrame ''' print(' - - start to read", "df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series '''", ":param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 '''", ":param return_type: 只是文件名 或 绝对地址 :return: list ''' files = [] for filename", "bins is None: bins = 200 if isinstance(data,pd.Series): data = data.values density, xdata", "= distribution_pdf(data, bins) cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf =", "as pd import numpy as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): '''", "each in files] return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir:", "1: for file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0],", "(xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def", "xmin = np.min(data) data_new = (upper - lower) * (data - xmin) /", "def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param", ":return: pd.DataFrame ''' print(' - - start to read - - %s'%readpath) reader", "''' if data is None: return None if not isinstance(data, pd.Series): data =", "data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf = [] for ind", ":param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None '''", "df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df) if", "distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series ''' if data", "inds = list(df.index) if num <= length: ind_sample = random.sample(inds, num) df_sample =", "df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return:", "files def get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir)", "= data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data:", "in os.walk(filedir): if len(each[-1]) >= 1: for file_i in each[-1]: if(filetype is None", "files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式", "* 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb", "except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None,", "xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return", "plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame ''' print('", "return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf = [] for", "2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random import pandas as", "None: return None if not isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index()", "- normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! *", "ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data)", "rgb2hex,转到colorfly去了 ''' import os import random import pandas as pd import numpy as", "= [] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return", "data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if", "subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1])", "for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def", "cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param subplot:", "data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if", "if return_type == 'name': return files elif return_type == 'abspath': files = [os.path.join(filedir,", "如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2", "in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): '''", "str,目录 :param filetype: str,文件格式 :return: list ''' files = [] for each in", "* 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注:", "修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random", "get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf", "def get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if", "coding:utf-8 -*- ''' 目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # - # 方法", "if not isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count", "return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界", "filetype) for each in files] return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表", "# 方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名", "grid: 网格线是否显示 :return: None ''' if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+", "'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data = distribution_cdf(data)", "获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布", "bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if data is None:", "str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data = distribution_cdf(data) ylabel", ":param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df)", "False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param", "random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre", "matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame", "pd import numpy as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的", "xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0", "if cmp: data = distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data) fg", "return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series '''", "return files elif return_type == 'abspath': files = [os.path.join(filedir, each + filetype) for", "网格线是否显示 :return: None ''' if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name)", "file_i)) return files def get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path", "str,文件格式 :return: list ''' files = [] for each in os.walk(filedir): if len(each[-1])", "density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata)", "= list(df.index) if num <= length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample,", "= os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的", "pandas.Series. :return: pandas.Series ''' if data is None: return None if not isinstance(data,", "if data is None: return None if bins is None: bins = 200", "cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据", "data is None: return None if bins is None: bins = 200 if", "df_sample = df.loc[ind_sample, :] else: df_sample = df return df_sample def distribution_fre(data): '''", "i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1):", "pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data = distribution_cdf(data) ylabel = 'CCDF' else:", "- distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16", "None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif", "cdf = pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return cdf def plot_distribution(data,", "axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data:", "get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path):", "in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type", "readpath: filepath :return: pd.DataFrame ''' print(' - - start to read - -", "chunkSize = 100000 chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk)", "def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf = [] for ind in", "= data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None): '''", "= pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks = [] while loop:", "cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf", "os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix", "len(each[-1]) >= 1: for file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] ==", "data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob'", "/ cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data:", "[] for each in os.walk(filedir): if len(each[-1]) >= 1: for file_i in each[-1]:", "distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 -", "= random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample = df return df_sample", "data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel =", "plt.figure() axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if", "方法: # 功能 # - # 方法 # * 读取文件较大的csv - read_csv *", "in files] return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录", "= [] for each in os.walk(filedir): if len(each[-1]) >= 1: for file_i in", ":param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df) if num <", "if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-',", "return_type: 只是文件名 或 绝对地址 :return: list ''' files = [] for filename in", "Dataframe ''' length = len(df) if num < 1: num = int(length *", "list ''' files = [] for each in os.walk(filedir): if len(each[-1]) >= 1:", "file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return", "chunks.append(chunk) except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir,", "将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据", "for filename in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0])", "files = [os.path.join(filedir, each + filetype) for each in files] return files return", "random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据", "+ np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data,", "None: return None if bins is None: bins = 200 if isinstance(data,pd.Series): data", "'CCDF' else: data = distribution_pdf(data) fg = plt.figure() axes = [] for i", "fg = plt.figure() axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-')", "数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布", "elif return_type == 'abspath': files = [os.path.join(filedir, each + filetype) for each in", "data = distribution_pdf(data) fg = plt.figure() axes = [] for i in range(subplot):", "(filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return", ":param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin", ":param filetype: str,文件格式 :return: list ''' files = [] for each in os.walk(filedir):", "[] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*',", "return_type == 'abspath': files = [os.path.join(filedir, each + filetype) for each in files]", "= plt.figure() axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution')", "[os.path.join(filedir, each + filetype) for each in files] return files return files def", "files = [] for filename in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1]", "else: data = distribution_pdf(data) fg = plt.figure() axes = [] for i in", "def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return: list '''", "改成 df.loc 试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe", "0.2 :return: Dataframe ''' length = len(df) if num < 1: num =", "2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与", "yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import random import", "index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf = []", "= data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata,", "''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return: list ''' files =", "num <= length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample", "'Prob' if cmp: data = distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data)", "* 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import", "pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False,", "- # 方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs *", "os import random import pandas as pd import numpy as np import matplotlib.pyplot", "[] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False", "axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2],", "''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame ''' print(' - - start", "* num) inds = list(df.index) if num <= length: ind_sample = random.sample(inds, num)", "is None: bins = 200 if isinstance(data,pd.Series): data = data.values density, xdata =", "= 100000 chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except", "reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return data def", "''' print(' - - start to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs)", "= [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop =", "获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布", "''' import os import random import pandas as pd import numpy as np", "return files def get_subdir(sup_dir): sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path =", "df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return:", "= pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf", "filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list ''' files = []", "np.max(data) xmin = np.min(data) data_new = (upper - lower) * (data - xmin)", "读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名", ":param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list ''' files =", "if num <= length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :] else:", "axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2:", "返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return: list ''' files = []", "length = len(df) if num < 1: num = int(length * num) inds", "distribution_pdf(data, bins) cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf,", "logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return", "if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir):", "def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df:", "def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper:", "''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series ''' if data is", "range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间", "data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布", "pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum() return", ":param filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list", "filename in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if", "Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率", "pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p def distribution_pdf(data,", "方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 -", "数据 :return: data_pdf,pandas.Series ''' if data is None: return None if bins is", "data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid,", "或者 pandas.Series. :return: pandas.Series ''' if data is None: return None if not", "# * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files", "data: 数据 :return: data_pdf,pandas.Series ''' if data is None: return None if bins", ":param readpath: filepath :return: pd.DataFrame ''' print(' - - start to read -", "sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param", "get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param return_type:", "2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data,", "可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax =", ":param upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin = np.min(data) data_new", "# -*- coding:utf-8 -*- ''' 目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # -", "或 绝对地址 :return: list ''' files = [] for filename in os.listdir(filedir): if", "for file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i))", "= False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址", "= [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1],", "- - start to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop =", "= [] for ind in pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf =", "num < 1: num = int(length * num) inds = list(df.index) if num", "random import pandas as pd import numpy as np import matplotlib.pyplot as plt", "get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return: list ''' files", "files elif return_type == 'abspath': files = [os.path.join(filedir, each + filetype) for each", "subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid:", "files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype: str,文件格式 :return: list", "axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param", "os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): '''", "index=pdf.index) cdf = cdf / cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False,", "= 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data =", "cdf / cdf.max() return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param", "+ filetype) for each in files] return files return files def get_files_all(filedir,filetype=None): '''", "not isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count /", "= pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data = distribution_cdf(data) ylabel = 'CCDF'", "备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 -", "each + filetype) for each in files] return files return files def get_files_all(filedir,filetype=None):", "- read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 -", "data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data = distribution_cdf(data) ylabel =", "def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if data", "return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 !", "= True chunkSize = 100000 chunks = [] while loop: try: chunk =", ":param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if data_norm: data_normed =", "for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True,", ":param grid: 网格线是否显示 :return: None ''' if data_norm: data_normed = normlize(data.values,0,1) name =", "! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length =", "= len(df) if num < 1: num = int(length * num) inds =", "as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame '''", "= pd.Series(data) data_count = data.value_counts().sort_index() data_p = data_count / data_count.sum() return data_p def", "filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif return_type == 'abspath': files", "else: df_sample = df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list", "bins=None): pdf = distribution_pdf(data, bins) cdf = [] for ind in pdf.index: cdf.append(pdf[:ind].sum())", ":param filedir: str,目录 :param filetype: str,文件格式 :return: list ''' files = [] for", "#axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel)", "200 if isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata", "-*- coding:utf-8 -*- ''' 目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # - #", "name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if cmp: data", "- dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了", "for each in files] return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param", ":return: 规范化的数据 ''' xmax = np.max(data) xmin = np.min(data) data_new = (upper -", "chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return", "length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample = df", "filepath :return: pd.DataFrame ''' print(' - - start to read - - %s'%readpath)", "or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs = []", "os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type ==", "只是文件名 或 绝对地址 :return: list ''' files = [] for filename in os.listdir(filedir):", "upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin = np.min(data) data_new =", "= distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data) fg = plt.figure() axes", "= reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return data", "''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param upper: 规范化的上界 :return:", "each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def", "import os import random import pandas as pd import numpy as np import", "if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel", "filedir: str,目录 :param filetype: str,文件格式 :return: list ''' files = [] for each", "= distribution_pdf(data) fg = plt.figure() axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1))", "* 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf *", "if len(each[-1]) >= 1: for file_i in each[-1]: if(filetype is None or os.path.splitext(file_i)[1]", "= df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series.", "print(' - - start to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop", "os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc", ":return: None ''' if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data", "试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length", "- %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks =", "df.loc[ind_sample, :] else: df_sample = df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param", "os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files elif return_type ==", "return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series", "int(length * num) inds = list(df.index) if num <= length: ind_sample = random.sample(inds,", "if isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata =", "style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3:", "return None if bins is None: bins = 200 if isinstance(data,pd.Series): data =", "ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample = df return", "%s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks = []", "def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series ''' if", "while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data", "绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示", "files = [] for each in os.walk(filedir): if len(each[-1]) >= 1: for file_i", "= int(length * num) inds = list(df.index) if num <= length: ind_sample =", "get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf", "data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins)", "- 修改完善,oh yeah! * 2018.11.27 - hex2rgb 与 rgb2hex,转到colorfly去了 ''' import os import", "if bins is None: bins = 200 if isinstance(data,pd.Series): data = data.values density,", "filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址 :return: list '''", "是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if data_norm: data_normed = normlize(data.values,0,1) name", "ylabel = 'CCDF' else: data = distribution_pdf(data) fg = plt.figure() axes = []", "subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log 和 log-log", "''' length = len(df) if num < 1: num = int(length * num)", "-1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf", "* 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre *", "data = data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata +", "规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin = np.min(data)", "= 200 if isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data, bins=bins, density=True)", "计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者 pandas.Series. :return: pandas.Series ''' if data is None:", "distribution_pdf(data) fg = plt.figure() axes = [] for i in range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0],", "data_new = (upper - lower) * (data - xmin) / (xmax - xmin)", "random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample = df return df_sample def", "len(df) if num < 1: num = int(length * num) inds = list(df.index)", "规范化的数据 ''' xmax = np.max(data) xmin = np.min(data) data_new = (upper - lower)", "data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式", "return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype:", "filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs = [] for subdir in", "num = int(length * num) inds = list(df.index) if num <= length: ind_sample", ":param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param", "= [os.path.join(filedir, each + filetype) for each in files] return files return files", "* 读取文件较大的csv - read_csv * 获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files *", "= normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data = pd.Series(data_normed,name=name) ylabel = 'Prob' if", "lower) * (data - xmin) / (xmax - xmin) + lower return data_new", "== 'abspath': files = [os.path.join(filedir, each + filetype) for each in files] return", "range(subplot): axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0,", "''' files = [] for filename in os.listdir(filedir): if (filetype is None or", "alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower:", "''' 目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # - # 方法 # *", ":return: data_pdf,pandas.Series ''' if data is None: return None if bins is None:", "- - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True chunkSize = 100000 chunks", "''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if data is None: return", "max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等", "def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log", "用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series ''' if data is None: return None", "abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df, num): ''' 返回dataframe的随机数量的样本,不放回。", "df.loc 试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如 0.2 :return: Dataframe '''", "- get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 -", "-*- ''' 目的:提供一些文件操作 和 数据处理的方法 方法: # 功能 # - # 方法 #", "''' xmax = np.max(data) xmin = np.min(data) data_new = (upper - lower) *", "import numpy as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv", "log-log :param data_norm: 数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None", "<= length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :] else: df_sample =", "pd.DataFrame ''' print(' - - start to read - - %s'%readpath) reader =", ":] else: df_sample = df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data:", "os.walk(filedir): if len(each[-1]) >= 1: for file_i in each[-1]: if(filetype is None or", "is None: return None if bins is None: bins = 200 if isinstance(data,pd.Series):", "获取当前目录下所有子目录 - get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样", "data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir:", "data is None: return None if not isinstance(data, pd.Series): data = pd.Series(data) data_count", "计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间", "data_count.sum() return data_p def distribution_pdf(data, bins=None): ''' 用频率密度直方图来估计概率密度分布 :param data: 数据 :return: data_pdf,pandas.Series", "return cdf def plot_distribution(data, subplot=2, data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param", "计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 *", "== filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs = [] for subdir", "axes.append(fg.add_subplot(1,subplot,i+1)) data.plot(ax=axes[0], style='*-') axes[0].set_title('Distribution') if subplot>=2: data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50])", "= np.max(data) xmin = np.min(data) data_new = (upper - lower) * (data -", "''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名 或 绝对地址", "in each[-1]: if(filetype is None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files", ":param data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized degree :param", "data = distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data) fg = plt.figure()", "num) inds = list(df.index) if num <= length: ind_sample = random.sample(inds, num) df_sample", "distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 - distribution_fre * 数据归一化到某个区间 - normlize", "read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return: pd.DataFrame ''' print(' - -", "is None or os.path.splitext(filename)[1] == filetype): files.append(os.path.splitext(filename)[0]) if return_type == 'name': return files", "files] return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param", "[] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs", "np.roll(xdata, -1))[:-1] / 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None):", "- get_subdirs * 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 -", "= [] for filename in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] ==", "grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param data_norm: 数据是否归一化,例如normlized", "filetype: str,文件格式 :return: list ''' files = [] for each in os.walk(filedir): if", "normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh yeah! * 2018.11.27", "None or os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs =", "- lower) * (data - xmin) / (xmax - xmin) + lower return", "# 功能 # - # 方法 # * 读取文件较大的csv - read_csv * 获取当前目录下所有子目录", "loop = True chunkSize = 100000 chunks = [] while loop: try: chunk", "cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if data_norm: data_normed = normlize(data.values,0,1)", "pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf =", "- get_files_all * 数据表随机长度的抽样 - random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 -", "- random_dataframe_sample * 计算概率密度分布 - distribution_pdf * 计算累计概率密度分布 - distribution_cdf * 计算频率分布 -", "import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param readpath: filepath :return:", "- distribution_fre * 数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12", ":return: pandas.Series ''' if data is None: return None if not isinstance(data, pd.Series):", "数据是否归一化,例如normlized degree :param cmp: 是否绘制累计密度概率 :param grid: 网格线是否显示 :return: None ''' if data_norm:", "* 数据归一化到某个区间 - normlize 备注: * 2017.10.16 - dataframe_filter方法还需要修改 * 2018.4.12 - 修改完善,oh", "/ 2.0 data_pdf = pd.Series(density, index=xdata) return data_pdf def distribution_cdf(data, bins=None): pdf =", "list(df.index) if num <= length: ind_sample = random.sample(inds, num) df_sample = df.loc[ind_sample, :]", "isinstance(data,pd.Series): data = data.values density, xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata", "df_sample = df return df_sample def distribution_fre(data): ''' 计算数据的频率密度分布,最后的概率值加起来都等于1 :param data: list 或者", "density, xdata = np.histogram(data, bins=bins, density=True) xdata = (xdata + np.roll(xdata, -1))[:-1] /", "样本数量,也可以是比例,例如 0.2 :return: Dataframe ''' length = len(df) if num < 1: num", "try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop = False data = pd.concat(chunks,ignore_index=True)", "None ''' if data_norm: data_normed = normlize(data.values,0,1) name = 'Normlized'+ str(data.name) data =", "each in os.walk(filedir): if len(each[-1]) >= 1: for file_i in each[-1]: if(filetype is", "= np.min(data) data_new = (upper - lower) * (data - xmin) / (xmax", "axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param data: 可以是list,array, ndarray等 :param lower: 规范化的下界 :param", "与 rgb2hex,转到colorfly去了 ''' import os import random import pandas as pd import numpy", "''' files = [] for each in os.walk(filedir): if len(each[-1]) >= 1: for", "data_norm=False, cmp=False, grid=True): ''' :param data: Series数据 :param subplot: 绘制原始的,log 和 log-log :param", "50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name)", "return files return files def get_files_all(filedir,filetype=None): ''' 返回目录和子目录下所以该类型的文件列表 :param filedir: str,目录 :param filetype:", "= 'CCDF' else: data = distribution_pdf(data) fg = plt.figure() axes = [] for", "* 获取当前目录下所有该类型的文件名 - get_files * 获取当前目录和所有子目录下所有该类型的文件名 - get_files_all * 数据表随机长度的抽样 - random_dataframe_sample *", "distribution_cdf(data, bins=None): pdf = distribution_pdf(data, bins) cdf = [] for ind in pdf.index:", "distribution_cdf(data) ylabel = 'CCDF' else: data = distribution_pdf(data) fg = plt.figure() axes =", "filetype=None, return_type='abspath'): ''' 返回当前目录下的所有该类型文件名或地址 :param filedir: str,目录 :param filetype: str,文件格式 :param return_type: 只是文件名", "StopIteration: loop = False data = pd.concat(chunks,ignore_index=True) return data def get_files(filedir, filetype=None, return_type='abspath'):", "- start to read - - %s'%readpath) reader = pd.read_csv(readpath,iterator=True,**kwargs) loop = True", "sub_dirs = [] for subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path)", "data.plot(ax=axes[1], style='*', logy=True, logx=True) axes[1].set_title('log-log') #axes[1].set_xlim([0, 50]) if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log')", "chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration: loop", "return None if not isinstance(data, pd.Series): data = pd.Series(data) data_count = data.value_counts().sort_index() data_p", "numpy as np import matplotlib.pyplot as plt def read_csv(readpath,**kwargs): ''' 分块读取大文件的 csv :param", "绝对地址 :return: list ''' files = [] for filename in os.listdir(filedir): if (filetype", "if num < 1: num = int(length * num) inds = list(df.index) if", "pdf.index: cdf.append(pdf[:ind].sum()) cdf = pd.Series(cdf, index=pdf.index) cdf = cdf / cdf.max() return cdf", "if subplot>=3: data.plot(ax=axes[2], style='*-', logy=True) axes[2].set_title('semi-log') for i in range(subplot): axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0,", "os.path.splitext(file_i)[1] == filetype) files.append(os.path.join(each[0], file_i)) return files def get_subdir(sup_dir): sub_dirs = [] for", "返回dataframe的随机数量的样本,不放回。 如果出现KeyError的话,把下面的 df.ix 改成 df.loc 试试 ! :param df: Dataframe,数据 :param num: 样本数量,也可以是比例,例如", "lower: 规范化的下界 :param upper: 规范化的上界 :return: 规范化的数据 ''' xmax = np.max(data) xmin =", "100000 chunks = [] while loop: try: chunk = reader.get_chunk(chunkSize) chunks.append(chunk) except StopIteration:", "axes[i].set_ylabel(ylabel) axes[i].set_xlabel(data.name) axes[i].set_xlim([0, max(data.index)*1.1]) axes[i].grid(grid, alpha=0.8) return axes def normlize(data,lower=0,upper=1): ''' 将数据规范化到某个区间 :param", "[] for filename in os.listdir(filedir): if (filetype is None or os.path.splitext(filename)[1] == filetype):", "if data is None: return None if not isinstance(data, pd.Series): data = pd.Series(data)", "subdir in os.listdir(sup_dir): abs_path = os.path.join(sup_dir,subdir) if os.path.isdir(abs_path): sub_dirs.append(abs_path) return sub_dirs def random_dataframe_sample(df," ]
[ "from yardstick.common import openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario):", "= self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True)", "= self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent =", "program and the accompanying materials # are made available under the terms of", "= self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait", "self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client()", "rights reserved. This program and the accompanying materials # are made available under", "accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from", "terms of the Apache License, Version 2.0 # which accompanies this distribution, and", "self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta", "disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image", "http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from", "self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout", "def run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image(", "wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\")", "others. # # All rights reserved. This program and the accompanying materials #", "self.setup_done = True def run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup()", "# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. # # All rights", "= self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5", "= self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\",", "self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\")", "the terms of the Apache License, Version 2.0 # which accompanies this distribution,", "############################################################################## import logging from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from yardstick.common", "sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\":", "the accompanying materials # are made available under the terms of the Apache", "class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg):", "Huawei Technologies Co.,Ltd and others. # # All rights reserved. This program and", "if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256,", "exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values = [image_id]", "this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios", "\"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg =", "True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume", "0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys =", "logging from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from yardstick.common import exceptions", "Apache License, Version 2.0 # which accompanies this distribution, and is available at", "context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"]", "self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\")", "Technologies Co.,Ltd and others. # # All rights reserved. This program and the", "def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute the test\"\"\"", "self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\")", "yardstick.common import openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create", "not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image", "result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys", "meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\":", "scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name =", "an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg", "LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values = [image_id] return self._push_to_outputs(keys, values)", "under the terms of the Apache License, Version 2.0 # which accompanies this", "self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume =", "2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ##############################################################################", "False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False", "self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True", "result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values = [image_id] return", "are made available under the terms of the Apache License, Version 2.0 #", "self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def", "self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client", "which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging", "image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait,", "All rights reserved. This program and the accompanying materials # are made available", "the Apache License, Version 2.0 # which accompanies this distribution, and is available", "# http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils", "disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0})", "self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\"", "self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario", "is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base from", "failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values", "raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values =", "self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done =", "allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError", "# # All rights reserved. This program and the accompanying materials # are", "Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0", "test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5,", "self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done =", "self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\",", "OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg", "base from yardstick.common import openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__) class", "container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not", "LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\",", "# which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import", "self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format,", "self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait =", "made available under the terms of the Apache License, Version 2.0 # which", "self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates", "= self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta =", "= self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self):", "True def run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id =", "############################################################################## # Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. # # All", "= self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates =", "available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base from yardstick.common", "accompanying materials # are made available under the terms of the Apache License,", "image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split()", "= self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\")", "self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600)", "= self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done", "reserved. This program and the accompanying materials # are made available under the", "timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise", "self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 =", "scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\")", "openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self,", "True) self.wait = self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False)", "Co.,Ltd and others. # # All rights reserved. This program and the accompanying", "materials # are made available under the terms of the Apache License, Version", "CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg", "result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name,", "self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent", "# All rights reserved. This program and the accompanying materials # are made", "= logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self,", "self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result):", "self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container", "self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 =", "\"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name,", "\"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options =", "\"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute the test\"\"\" if not", "image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create image successful!\")", "License, Version 2.0 # which accompanies this distribution, and is available at #", "__init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name", "exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\"", "self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\", True) self.timeout =", "self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images')", "setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done:", "logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg,", "openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates,", "and others. # # All rights reserved. This program and the accompanying materials", "self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta,", "import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ =", "if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1}) LOG.info(\"Create", "= self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format", "self.options.get(\"wait\", True) self.timeout = self.options.get(\"timeout\", 3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\")", "at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base from yardstick.common import", "yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from yardstick.common import exceptions LOG =", "self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent,", "self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume)", "self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\",", "1}) LOG.info(\"Create image successful!\") keys = self.scenario_cfg.get(\"output\", '').split() values = [image_id] return self._push_to_outputs(keys,", "and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import base", "setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute the test\"\"\" if", "not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format,", "= False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute", "= True def run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id", "'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",)", "container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create", "context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\",", "image\"\"\" __scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg =", "import openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an", "= self.options.get(\"volume\") self.shade_client = openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done", "the test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container,", "self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name", "available under the terms of the Apache License, Version 2.0 # which accompanies", "def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"]", "= self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256", "from yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\"", "= scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name =", "self.options.get(\"file_name\") self.container = self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format =", "yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__", "= context_cfg self.options = self.scenario_cfg[\"options\"] self.name = self.options[\"image_name\"] self.file_name = self.options.get(\"file_name\") self.container =", "self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True)", "__scenario_type__ = \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg", "distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## import logging from yardstick.benchmark.scenarios import", "and the accompanying materials # are made available under the terms of the", "volume=self.volume) if not image_id: result.update({\"image_create\": 0}) LOG.error(\"Create image failed!\") raise exceptions.ScenarioCreateImageError result.update({\"image_create\": 1})", "from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from yardstick.common import exceptions LOG", "This program and the accompanying materials # are made available under the terms", "= openstack_utils.create_image( self.shade_client, self.name, filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout,", "self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format =", "# are made available under the terms of the Apache License, Version 2.0", "= self.options.get(\"container\", 'images') self.md5 = self.options.get(\"md5\") self.sha256 = self.options.get(\"sha256\") self.disk_format = self.options.get(\"disk_format\") self.container_format", "md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if not image_id:", "LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack image\"\"\" __scenario_type__ = \"CreateImage\" def", "run(self, result): \"\"\"execute the test\"\"\" if not self.setup_done: self.setup() image_id = openstack_utils.create_image( self.shade_client,", "2017 Huawei Technologies Co.,Ltd and others. # # All rights reserved. This program", "import base from yardstick.common import openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__)", "self.disk_format = self.options.get(\"disk_format\") self.container_format = self.options.get(\"container_format\",) self.disable_vendor_agent = self.options.get(\"disable_vendor_agent\", True) self.wait = self.options.get(\"wait\",", "openstack_utils from yardstick.common import exceptions LOG = logging.getLogger(__name__) class CreateImage(base.Scenario): \"\"\"Create an OpenStack", "import logging from yardstick.benchmark.scenarios import base from yardstick.common import openstack_utils from yardstick.common import", "= openstack_utils.get_shade_client() self.setup_done = False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def", "(c) 2017 Huawei Technologies Co.,Ltd and others. # # All rights reserved. This", "False def setup(self): \"\"\"scenario setup\"\"\" self.setup_done = True def run(self, result): \"\"\"execute the", "Copyright (c) 2017 Huawei Technologies Co.,Ltd and others. # # All rights reserved.", "3600) self.allow_duplicates = self.options.get(\"allow_duplicates\", False) self.meta = self.options.get(\"meta\") self.volume = self.options.get(\"volume\") self.shade_client =", "of the Apache License, Version 2.0 # which accompanies this distribution, and is", "= \"CreateImage\" def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options", "filename=self.file_name, container=self.container, md5=self.md5, sha256=self.sha256, disk_format=self.disk_format, container_format=self.container_format, disable_vendor_agent=self.disable_vendor_agent, wait=self.wait, timeout=self.timeout, allow_duplicates=self.allow_duplicates, meta=self.meta, volume=self.volume) if" ]
[ "Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"],", "self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self):", "= \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body =", "\"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response =", "TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code,", "test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2)))", "_json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\")", "response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\",", "= self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def", "\"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields),", "= \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response", "_json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self):", "= \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body", "from unittest.mock import patch import pyipip from dns.exception import DNSException from remote.blueprints.network import", "dns.exception import DNSException from remote.blueprints.network import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase):", "\"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func,", "self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response", "200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def", "test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr)", "= self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"])", "pyipip from dns.exception import DNSException from remote.blueprints.network import IPRecord from tests.test_app import TestBase", "= self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def", "self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock", "2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200)", "\"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\",", "def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"],", "= \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\",", "self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self):", "200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def", "list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"])", "400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func =", "_json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")):", "self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body)", "self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields", "_json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response =", "response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func,", "self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400)", "with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body", "self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json", "- 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code,", "test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400)", "import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response", "def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"],", "= response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func", "TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response =", "response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code,", "_json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self):", "self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response", "self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func =", "DNSException from remote.blueprints.network import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain =", "self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self):", "side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"])", "200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response", "_test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json", "404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response", "response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\")", "patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body)", "400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response =", "\"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json", "Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"Unable\",", "_json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body =", "response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\",", "= response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response", "self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body =", "def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code,", "tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self):", "_json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\"", "self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()),", "= \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body =", "= response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\")", "self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\"", "import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip =", "self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str,", "with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not", "= \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with (", "unittest.mock import patch import pyipip from dns.exception import DNSException from remote.blueprints.network import IPRecord", "from remote.blueprints.network import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\"", "self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\",", "<reponame>tssujt/remote from unittest.mock import patch import pyipip from dns.exception import DNSException from remote.blueprints.network", "response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"])", "\"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json", "return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\",", "self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response", "self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def", "= self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def", "\"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body =", "): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body)", "self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body)", "test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\",", "= response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with", "= self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list)", "_test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200)", "self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields))", "\"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response =", "def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"],", "patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\",", "response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\"", "self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock", "= response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with", "_json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func", "def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\",", "\"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\",", "self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"])", "self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self):", "patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"])", "found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body", "self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body", "_json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body", "def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) -", "from dns.exception import DNSException from remote.blueprints.network import IPRecord from tests.test_app import TestBase class", "( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body =", "_json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json", "IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\"", "response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"])", "_json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json", "_json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")):", "patch import pyipip from dns.exception import DNSException from remote.blueprints.network import IPRecord from tests.test_app", "response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func =", "_json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body =", "self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\",", "response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400)", "mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase),", "test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\")", "side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"])", "self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400)", "_json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body", "= self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertEqual(_json_body[\"ip\"], response.request.remote_addr) self.assertIn(\"ipv4\", _json_body)", "response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func,", "self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response =", "_json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response =", "test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404)", "fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ):", "import DNSException from remote.blueprints.network import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain", "self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func", "import pyipip from dns.exception import DNSException from remote.blueprints.network import IPRecord from tests.test_app import", "ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with", "self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json", "self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func", "mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\" with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body", "ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body", "= response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code,", "\"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body)", "def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code,", "400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func =", "import patch import pyipip from dns.exception import DNSException from remote.blueprints.network import IPRecord from", "response = self.client.get(\"/ipip/FAKE_IP\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"Invalid\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"])", "_json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self): mock_func = \"remote.blueprints.network.dns.resolver.Resolver.resolve\"", "set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"])", "= \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func,", "self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 404) _json_body = response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def", "self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"missing\", _json_body[\"message\"]) def test_resolve_error_2(self):", "def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"])", "self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\",", "= self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body)", "response.json self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response =", "_json_body) self.assertIn(\"ipv6_available\", _json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields =", "response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"],", "class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def test_ip(self): response = self.client.get(\"/ip\")", "response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code,", "self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200)", "self.assertFalse(_json_body[\"success\"]) def test_ipip_error_3(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" with patch(ipdb_func, side_effect=Exception(\"Mock Exception\")): response = self.client.get(f\"/ipip/{self._test_ip}\")", "from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip = \"127.0.0.1\" def", "self.assertIn(\"ttl\", _json_body) self.assertIsInstance(_json_body[\"answers\"], list) def test_resolve_error_1(self): response = self.client.get(\"/dns/resolve\") self.assertEqual(response.status_code, 400) _json_body =", "remote.blueprints.network import IPRecord from tests.test_app import TestBase class TestNetwork(TestBase): _test_domain = \"example.com\" _test_ip", "response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\", _json_body[\"status\"]) self.assertFalse(_json_body[\"success\"]) self.assertIn(\"Unable\", _json_body[\"message\"])", "_json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_ipip_error_2(self): response = self.client.get(\"/ipip/FAKE_IP\")", "self.assertIn(\"success\", _json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\")", "_json_body) self.assertIn(\"continent_code\", _json_body) self.assertIn(\"cn_division_code\", _json_body) self.assertEqual(set(_json_body.keys()), set(IPRecord._fields)) def test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code,", "range(len(IPRecord._fields) - 2))) with ( patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\")", "test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"nameservers\", _json_body) self.assertIn(\"ttl\", _json_body)", "= response.json self.assertIn(\"not found\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\") self.assertFalse(_json_body[\"success\"]) def test_resolve(self): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\")", "_json_body) def test_ipip(self): ipdb_func = \"remote.blueprints.network._get_ipdb\" mock_func = \"remote.blueprints.network.pyipip.IPIPDatabase.lookup\" fake_ip_fields = \"\\t\".join(map(str, range(len(IPRecord._fields)", "test_ipip_error_1(self): response = self.client.get(\"/ipip/FF:FF\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertIn(\"IPv6\", _json_body[\"message\"]) self.assertEqual(_json_body[\"status\"], \"error\")", "with patch(mock_func, side_effect=DNSException(\"Mock Exception\")): response = self.client.get(f\"/dns/resolve?domain={self._test_domain}\") self.assertEqual(response.status_code, 400) _json_body = response.json self.assertEqual(\"error\",", "patch(ipdb_func, return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json", "return_value=pyipip.IPIPDatabase), patch(mock_func, return_value=fake_ip_fields), ): response = self.client.get(f\"/ipip/{self._test_ip}\") self.assertEqual(response.status_code, 200) _json_body = response.json self.assertIn(\"success\"," ]
[ "ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize db instance '''", "class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize db instance ''' raise NotImplementedError", "@author: <EMAIL> @last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod", "from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize", "abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize db", "@last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self):", "<filename>api/Mongo/abc_dbops.py<gh_stars>0 '''AbstractDbOps: 数据库操作接口抽象类 @author: <EMAIL> @last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod", "import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize db instance", "'''AbstractDbOps: 数据库操作接口抽象类 @author: <EMAIL> @last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod class", "''' from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg:", "<EMAIL> @last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def", "2019.6.20 ''' from abc import ABC, abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化", "数据库操作接口抽象类 @author: <EMAIL> @last_modified: 2019.6.20 ''' from abc import ABC, abstractmethod class AbstractDbOps(ABC):", "abstractmethod class AbstractDbOps(ABC): @abstractmethod def __init__(self): '''初始化 eg: Initialize db instance ''' raise" ]
[ "and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) == 2 and hand[1].is_ace() and", "human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self):", "self.__str__() def value(self): return self.rank if self.rank < 10 else 10 def is_ace(self):", "we don't bust by take the Ace as an eleven return v+10 return", "def value(self): return self.rank if self.rank < 10 else 10 def is_ace(self): if", "an eleven return v+10 return v def blackjack(hand): if len(hand) == 2 and", "= rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit])", "\"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit = suit self.rank = rank", "\"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__()", "if self.rank >= 10: return True return False def value(hand): v = sum([c.value()", "<= 11: #if there is an Ace and we don't bust by take", "\"King\"] def __init__(self, suit=0, rank=1): self.suit = suit self.rank = rank def __str__(self):", "c.is_ace()]) > 0 and v <= 11: #if there is an Ace and", "bust by take the Ace as an eleven return v+10 return v def", "= \"0.1\" import random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names", "'<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import random class Card(object): suit_names =", "0 and v <= 11: #if there is an Ace and we don't", "True return False def is_ten(self): if self.rank >= 10: return True return False", "suit self.rank = rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{}", "and v <= 11: #if there is an Ace and we don't bust", "c in hand]) if len([c for c in hand if c.is_ace()]) > 0", "self.rank < 10 else 10 def is_ace(self): if self.rank == 1: return True", "self.rank if self.rank < 10 else 10 def is_ace(self): if self.rank == 1:", "return False def value(hand): v = sum([c.value() for c in hand]) if len([c", "hand[0].is_ten(): return True return False if __name__ == '__main__': shoe = Shoe() shoe.shuffle()", "return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return self.rank if", "False def is_ten(self): if self.rank >= 10: return True return False def value(hand):", ">= 10: return True return False def value(hand): v = sum([c.value() for c", "return True return False def value(hand): v = sum([c.value() for c in hand])", "v <= 11: #if there is an Ace and we don't bust by", "return v def blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return", "blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand)", "#encoding: utf8 __author__ = '<NAME>' __email__ = '<EMAIL>' __license__ = \"GPL\" __version__ =", "rank=1): self.suit = suit self.rank = rank def __str__(self): \"\"\"Returns a human-readable string", "self.rank = rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank],", "take the Ace as an eleven return v+10 return v def blackjack(hand): if", "def is_ten(self): if self.rank >= 10: return True return False def value(hand): v", "return self.__str__() def value(self): return self.rank if self.rank < 10 else 10 def", "\"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit = suit self.rank = rank def", "= '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import random class Card(object): suit_names", "there is an Ace and we don't bust by take the Ace as", "== 2 and hand[1].is_ace() and hand[0].is_ten(): return True return False if __name__ ==", "rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",", "def __repr__(self): return self.__str__() def value(self): return self.rank if self.rank < 10 else", "\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"]", "and hand[1].is_ace() and hand[0].is_ten(): return True return False if __name__ == '__main__': shoe", "c in hand if c.is_ace()]) > 0 and v <= 11: #if there", "True if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return True return False", "\"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",", "rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def", "class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\",", "def value(hand): v = sum([c.value() for c in hand]) if len([c for c", "hand[1].is_ace() and hand[0].is_ten(): return True return False if __name__ == '__main__': shoe =", "if self.rank < 10 else 10 def is_ace(self): if self.rank == 1: return", "== 2 and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) == 2 and", "utf8 __author__ = '<NAME>' __email__ = '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\"", "the Ace as an eleven return v+10 return v def blackjack(hand): if len(hand)", "= [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\",", "len([c for c in hand if c.is_ace()]) > 0 and v <= 11:", "def __init__(self, suit=0, rank=1): self.suit = suit self.rank = rank def __str__(self): \"\"\"Returns", "False def value(hand): v = sum([c.value() for c in hand]) if len([c for", "v def blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return True", "return self.rank if self.rank < 10 else 10 def is_ace(self): if self.rank ==", "> 0 and v <= 11: #if there is an Ace and we", "return v+10 return v def blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and", "\"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1):", "in hand]) if len([c for c in hand if c.is_ace()]) > 0 and", "in hand if c.is_ace()]) > 0 and v <= 11: #if there is", "v = sum([c.value() for c in hand]) if len([c for c in hand", "if c.is_ace()]) > 0 and v <= 11: #if there is an Ace", "an Ace and we don't bust by take the Ace as an eleven", "suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\",", "= \"GPL\" __version__ = \"0.1\" import random class Card(object): suit_names = [\"♠️\", \"♣️\",", "return True if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return True return", "if self.rank == 1: return True return False def is_ten(self): if self.rank >=", "\"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0,", "hand]) if len([c for c in hand if c.is_ace()]) > 0 and v", "for c in hand]) if len([c for c in hand if c.is_ace()]) >", "else 10 def is_ace(self): if self.rank == 1: return True return False def", "10: return True return False def value(hand): v = sum([c.value() for c in", "True return False def value(hand): v = sum([c.value() for c in hand]) if", "hand[1].is_ten(): return True if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return True", "hand if c.is_ace()]) > 0 and v <= 11: #if there is an", "'.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return self.rank if self.rank <", "suit=0, rank=1): self.suit = suit self.rank = rank def __str__(self): \"\"\"Returns a human-readable", "random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\",", "= '<NAME>' __email__ = '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import random", "and hand[1].is_ten(): return True if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return", "#if there is an Ace and we don't bust by take the Ace", "self.suit = suit self.rank = rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\"", "by take the Ace as an eleven return v+10 return v def blackjack(hand):", "is_ten(self): if self.rank >= 10: return True return False def value(hand): v =", "== 1: return True return False def is_ten(self): if self.rank >= 10: return", "Ace as an eleven return v+10 return v def blackjack(hand): if len(hand) ==", "[None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\",", "\"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit = suit", "representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return self.rank", "def is_ace(self): if self.rank == 1: return True return False def is_ten(self): if", "v+10 return v def blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten():", "__author__ = '<NAME>' __email__ = '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import", "Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return self.rank if self.rank < 10", "2 and hand[1].is_ace() and hand[0].is_ten(): return True return False if __name__ == '__main__':", "def blackjack(hand): if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return True if", "\"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit =", "10 def is_ace(self): if self.rank == 1: return True return False def is_ten(self):", "\"GPL\" __version__ = \"0.1\" import random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\",", "if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return True return False if", "is_ace(self): if self.rank == 1: return True return False def is_ten(self): if self.rank", "and hand[0].is_ten(): return True return False if __name__ == '__main__': shoe = Shoe()", "as an eleven return v+10 return v def blackjack(hand): if len(hand) == 2", "\"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit = suit self.rank", "1: return True return False def is_ten(self): if self.rank >= 10: return True", "\"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",", "= sum([c.value() for c in hand]) if len([c for c in hand if", "__license__ = \"GPL\" __version__ = \"0.1\" import random class Card(object): suit_names = [\"♠️\",", "= suit self.rank = rank def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return", "'{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return self.rank if self.rank", "import random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None,", "\"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit = suit self.rank =", "a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def", "string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return self.__str__() def value(self): return", "self.rank >= 10: return True return False def value(hand): v = sum([c.value() for", "return True return False def is_ten(self): if self.rank >= 10: return True return", "don't bust by take the Ace as an eleven return v+10 return v", "return True return False if __name__ == '__main__': shoe = Shoe() shoe.shuffle() print(shoe.deal_cards(2))", "__init__(self, suit=0, rank=1): self.suit = suit self.rank = rank def __str__(self): \"\"\"Returns a", "return False def is_ten(self): if self.rank >= 10: return True return False def", "value(hand): v = sum([c.value() for c in hand]) if len([c for c in", "def __str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self):", "Ace and we don't bust by take the Ace as an eleven return", "11: #if there is an Ace and we don't bust by take the", "self.rank == 1: return True return False def is_ten(self): if self.rank >= 10:", "__repr__(self): return self.__str__() def value(self): return self.rank if self.rank < 10 else 10", "eleven return v+10 return v def blackjack(hand): if len(hand) == 2 and hand[0].is_ace()", "value(self): return self.rank if self.rank < 10 else 10 def is_ace(self): if self.rank", "len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) == 2", "for c in hand if c.is_ace()]) > 0 and v <= 11: #if", "2 and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) == 2 and hand[1].is_ace()", "[\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\",", "\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self,", "10 else 10 def is_ace(self): if self.rank == 1: return True return False", "'<NAME>' __email__ = '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import random class", "= [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\",", "< 10 else 10 def is_ace(self): if self.rank == 1: return True return", "and we don't bust by take the Ace as an eleven return v+10", "sum([c.value() for c in hand]) if len([c for c in hand if c.is_ace()])", "__str__(self): \"\"\"Returns a human-readable string representation.\"\"\" return '{}{} '.format(Card.rank_names[self.rank], Card.suit_names[self.suit]) def __repr__(self): return", "if len(hand) == 2 and hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) ==", "__email__ = '<EMAIL>' __license__ = \"GPL\" __version__ = \"0.1\" import random class Card(object):", "\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def", "hand[0].is_ace() and hand[1].is_ten(): return True if len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten():", "\"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] def __init__(self, suit=0, rank=1): self.suit", "\"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\",", "if len([c for c in hand if c.is_ace()]) > 0 and v <=", "\"0.1\" import random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names =", "len(hand) == 2 and hand[1].is_ace() and hand[0].is_ten(): return True return False if __name__", "__version__ = \"0.1\" import random class Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"]", "is an Ace and we don't bust by take the Ace as an", "Card(object): suit_names = [\"♠️\", \"♣️\", \"♦️\", \"♥️\"] rank_names = [None, \"Ace\", \"2\", \"3\"," ]
[ "pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break arquivo =", "arquivo.close() maior_idade = [] contador_maior = 0 for pessoa in lista_cadastro: if int(pessoa[2])", "in lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa) # break # for", "for pessoa in lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa) # break", "= 0 for pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior =", "um arquivo # \"a\" append - adiciona novas informações import matplotlib.pyplot as plt", "int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break arquivo =", "for pessoa in lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa) # #", "in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y", "import matplotlib.pyplot as plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro =", "np.pi,400) y = np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher)", "- adiciona novas informações import matplotlib.pyplot as plt import numpy as np arquivo", "lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break", "arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0 for pessoa in lista_cadastro: if", "[] contador_maior = 0 for pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa)", "arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0 for pessoa in lista_cadastro: if", "for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor", "arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in", "pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0 for pessoa", "';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2)", "in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0", "break mulher = [] contador_mulher = 0 for pessoa in lista_cadastro: if pessoa[3]", "for pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher +", "# for pessoa in lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa) #", "<reponame>MateusPeschke/CursoPython # \"r\" read - abre o arquivo # \"w\" write - cria", "# break # for pessoa in lista_cadastro: # if not ('0' in pessoa[0])", "('0' in pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa) # # break", "pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior =", "lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: # if pessoa[0].upper() == '300': #", "in pessoa[1]): # print(pessoa) # # break mulher = [] contador_mulher = 0", "lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break", "append - adiciona novas informações import matplotlib.pyplot as plt import numpy as np", "matplotlib.pyplot as plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = []", "# break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str)", ">= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w')", "== 'm': homens.append(pessoa) contador_homens = contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w')", "1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\"", "for pessoa in lista_cadastro: # if not ('0' in pessoa[0]) and not ('A'", "0 for pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens", "np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor')", "quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor) plt.subplot(2,1,1) plt.bar(genero,quantidade_genero) plt.subplot(2,1,2) plt.bar(Idade,quantidade_idade)", "in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0", "break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close()", "read - abre o arquivo # \"w\" write - cria ou substitui um", "== 'F': # print(pessoa) # # break # for pessoa in lista_cadastro: #", "- cria ou substitui um arquivo # \"a\" append - adiciona novas informações", "print(pessoa) # break # for pessoa in lista_cadastro: # if pessoa[3].upper() == 'F':", "'F': # print(pessoa) # # break # for pessoa in lista_cadastro: # if", "contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str", "open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = []", "pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400)", "int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break arquivo =", "# # break mulher = [] contador_mulher = 0 for pessoa in lista_cadastro:", "genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor)", "pessoa in lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa) # # break", "lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa) # # break # for", "if pessoa[0].upper() == '300': # print(pessoa) # break # for pessoa in lista_cadastro:", "= open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x =", "# print(pessoa) # break # for pessoa in lista_cadastro: # if pessoa[3].upper() ==", "arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo: pessoa = pessoa.strip()", "\"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor) plt.subplot(2,1,1) plt.bar(genero,quantidade_genero) plt.subplot(2,1,2)", "in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 #", "\"r\" read - abre o arquivo # \"w\" write - cria ou substitui", "arquivo # \"w\" write - cria ou substitui um arquivo # \"a\" append", "= open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade =", "= ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0 for pessoa in", "+ 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str =", "= contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens:", "arquivo.close() menor_idade = [] contador_menor = 0 for pessoa in lista_cadastro: if int(pessoa[2])", "# if pessoa[0].upper() == '300': # print(pessoa) # break # for pessoa in", "arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens", "np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo: pessoa =", "pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens + 1", "# \"a\" append - adiciona novas informações import matplotlib.pyplot as plt import numpy", "+ 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str =", "maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0 for", "arquivo.close() homens = [] contador_homens = 0 for pessoa in lista_cadastro: if pessoa[3]", "# if not ('0' in pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa)", "o arquivo # \"w\" write - cria ou substitui um arquivo # \"a\"", "= pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: #", "np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero", "pessoa in lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa) # break #", "< 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w')", "';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0 for pessoa in lista_cadastro:", "pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens + 1 # break arquivo =", "mulher = [] contador_mulher = 0 for pessoa in lista_cadastro: if pessoa[3] ==", "homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0 for", "pessoa[0].upper() == '300': # print(pessoa) # break # for pessoa in lista_cadastro: #", "print(pessoa) # # break mulher = [] contador_mulher = 0 for pessoa in", "open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = []", "contador_menor = contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in", "[] contador_homens = 0 for pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa)", "arquivo # \"a\" append - adiciona novas informações import matplotlib.pyplot as plt import", "lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa) # break # for pessoa", "';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0 for pessoa in lista_cadastro:", "arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade", "arquivo.close() # for pessoa in lista_cadastro: # if pessoa[0].upper() == '300': # print(pessoa)", "mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0 for", "= 0 for pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor =", "contador_maior = contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in", "pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0 for pessoa", "as plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for", "open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo: pessoa = pessoa.strip() pessoa =", "'m': homens.append(pessoa) contador_homens = contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for", "in lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa) # # break #", "# # break # for pessoa in lista_cadastro: # if not ('0' in", "mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa", "break # for pessoa in lista_cadastro: # if not ('0' in pessoa[0]) and", "= [] contador_menor = 0 for pessoa in lista_cadastro: if int(pessoa[2]) < 18:", "menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa", "= [] for pessoa in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa)", "= [] contador_mulher = 0 for pessoa in lista_cadastro: if pessoa[3] == 'f':", "if pessoa[3].upper() == 'F': # print(pessoa) # # break # for pessoa in", "ou substitui um arquivo # \"a\" append - adiciona novas informações import matplotlib.pyplot", "= ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y = np.sin(x **", "== 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w')", "= ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens = 0 for pessoa in", "novas informações import matplotlib.pyplot as plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\")", "= contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade:", "for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens", "18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for", "= np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\")", "pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1", "maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa", "== '300': # print(pessoa) # break # for pessoa in lista_cadastro: # if", "for pessoa in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() #", "plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa", "substitui um arquivo # \"a\" append - adiciona novas informações import matplotlib.pyplot as", "# \"r\" read - abre o arquivo # \"w\" write - cria ou", "lista_cadastro: # if not ('0' in pessoa[0]) and not ('A' in pessoa[1]): #", "not ('0' in pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa) # #", "contador_mulher = contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in", "1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\"", "= 0 for pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens =", "arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade", "contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str", "in pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa) # # break mulher", "contador_maior = 0 for pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior", "in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 #", "for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior", "1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\"", "x = np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2) genero = (\"Homens\",", "# for pessoa in lista_cadastro: # if not ('0' in pessoa[0]) and not", "maior_idade = [] contador_maior = 0 for pessoa in lista_cadastro: if int(pessoa[2]) >=", "pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa) # # break mulher =", "pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: # if", "if not ('0' in pessoa[0]) and not ('A' in pessoa[1]): # print(pessoa) #", "= 0 for pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher =", "import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in", "* np.pi,400) y = np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero =", "lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens + 1 # break", "'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for", "contador_mulher = 0 for pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher", "pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1", "[] contador_menor = 0 for pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa)", "+ 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str =", "[] contador_mulher = 0 for pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa)", "- abre o arquivo # \"w\" write - cria ou substitui um arquivo", "= ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0 for pessoa in", "for pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens +", "# break mulher = [] contador_mulher = 0 for pessoa in lista_cadastro: if", "1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\"", "0 for pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher", "if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens + 1 # break arquivo", "open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2", "'300': # print(pessoa) # break # for pessoa in lista_cadastro: # if pessoa[3].upper()", "pessoa[1]): # print(pessoa) # # break mulher = [] contador_mulher = 0 for", "informações import matplotlib.pyplot as plt import numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro", "write - cria ou substitui um arquivo # \"a\" append - adiciona novas", "if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1 # break arquivo", "arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2) genero", "';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0 for pessoa in lista_cadastro:", "pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0 for pessoa", "pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y = np.sin(x", "= pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: # if pessoa[0].upper() ==", "('A' in pessoa[1]): # print(pessoa) # # break mulher = [] contador_mulher =", "homens = [] contador_homens = 0 for pessoa in lista_cadastro: if pessoa[3] ==", "homens.append(pessoa) contador_homens = contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa", "open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = []", "if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break arquivo", "18: menor_idade.append(pessoa) contador_menor = contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for", "= (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor) plt.subplot(2,1,1) plt.bar(genero,quantidade_genero) plt.subplot(2,1,2) plt.bar(Idade,quantidade_idade) plt.show()", "contador_menor + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str", "cria ou substitui um arquivo # \"a\" append - adiciona novas informações import", "# for pessoa in lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa) #", "in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 #", "break # for pessoa in lista_cadastro: # if pessoa[3].upper() == 'F': # print(pessoa)", "contador_menor = 0 for pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor", "= [] contador_homens = 0 for pessoa in lista_cadastro: if pessoa[3] == 'm':", "break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close()", "= [] contador_maior = 0 for pessoa in lista_cadastro: if int(pessoa[2]) >= 18:", "and not ('A' in pessoa[1]): # print(pessoa) # # break mulher = []", "** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade", "lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break", "if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior + 1 # break arquivo", "# break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str)", "= (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor) plt.subplot(2,1,1)", "in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa", "abre o arquivo # \"w\" write - cria ou substitui um arquivo #", "[] for pessoa in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close()", "not ('A' in pessoa[1]): # print(pessoa) # # break mulher = [] contador_mulher", "for pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor +", "pessoa[3].upper() == 'F': # print(pessoa) # # break # for pessoa in lista_cadastro:", "as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo: pessoa", "# break # for pessoa in lista_cadastro: # if pessoa[3].upper() == 'F': #", "pessoa in lista_cadastro: if pessoa[3] == 'f': mulher.append(pessoa) contador_mulher = contador_mulher + 1", "print(pessoa) # # break # for pessoa in lista_cadastro: # if not ('0'", "# break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str)", "= contador_maior + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade:", "arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor = 0 for pessoa in lista_cadastro: if", "\"w\" write - cria ou substitui um arquivo # \"a\" append - adiciona", "numpy as np arquivo = open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo:", "lista_cadastro = [] for pessoa in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';')", "pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro:", "in homens: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() maior_idade = [] contador_maior = 0", "0 for pessoa in lista_cadastro: if int(pessoa[2]) < 18: menor_idade.append(pessoa) contador_menor = contador_menor", "arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\menor_idade.txt','w') for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x", "pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: # if pessoa[0].upper() == '300':", "# if pessoa[3].upper() == 'F': # print(pessoa) # # break # for pessoa", "= contador_mulher + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher:", "0 for pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior", "# print(pessoa) # # break # for pessoa in lista_cadastro: # if not", "pessoa in arquivo: pessoa = pessoa.strip() pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for", "menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 * np.pi,400) y =", "in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens = contador_homens + 1 #", "y = np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade", "pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens = [] contador_homens =", "for pessoa in menor_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() x = np.linspace(0,2 *", "arquivo.close() x = np.linspace(0,2 * np.pi,400) y = np.sin(x ** 2) genero =", "(\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade = (contador_maior,contador_menor) plt.subplot(2,1,1) plt.bar(genero,quantidade_genero)", "+ 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str =", "2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade = ('maior','menor') quantidade_idade =", "# print(pessoa) # # break mulher = [] contador_mulher = 0 for pessoa", "break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close()", "# \"w\" write - cria ou substitui um arquivo # \"a\" append -", "for pessoa in lista_cadastro: if int(pessoa[2]) >= 18: maior_idade.append(pessoa) contador_maior = contador_maior +", "contador_homens = contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in", "= open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() homens =", "pessoa in lista_cadastro: # if not ('0' in pessoa[0]) and not ('A' in", "contador_homens = 0 for pessoa in lista_cadastro: if pessoa[3] == 'm': homens.append(pessoa) contador_homens", "pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade = [] contador_menor =", "= open(r\"C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\cadastro.txt\",\"r\") lista_cadastro = [] for pessoa in arquivo: pessoa = pessoa.strip() pessoa", "contador_homens + 1 # break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\homens.txt','w') for pessoa in homens: pessoa_str", "# break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str)", "adiciona novas informações import matplotlib.pyplot as plt import numpy as np arquivo =", "= open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\maior_idade.txt','w') for pessoa in maior_idade: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close() menor_idade =", "pessoa = pessoa.split(';') lista_cadastro.append(pessoa) arquivo.close() # for pessoa in lista_cadastro: # if pessoa[0].upper()", "= np.sin(x ** 2) genero = (\"Homens\", \"Mulheres\") quantidade_genero = (contador_homens,contador_mulher) Idade =", "\"a\" append - adiciona novas informações import matplotlib.pyplot as plt import numpy as", "break arquivo = open(r'C:\\Users\\67184\\Documents\\Desenvolvimento_Agil_em_Python_1_2020\\aula4\\exemplos\\mulher.txt','w') for pessoa in mulher: pessoa_str = ';'.join(pessoa)+\"\\n\" arquivo.write(pessoa_str) arquivo.close()", "in lista_cadastro: # if not ('0' in pessoa[0]) and not ('A' in pessoa[1]):", "menor_idade = [] contador_menor = 0 for pessoa in lista_cadastro: if int(pessoa[2]) <" ]
[ "reduction, channel), nn.Sigmoid() ) def forward(self, x): b, c, _, _ = x.size()", "__init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__()", "use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x", "x = self.resnet(images) # [N, 2048, 1, 1] x = self.dropout(x) x =", "== 'r34': self.resnet = resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args) elif", "model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs)", "= self.relu(h) g = self.unpool2(h) # bs 128 w/8 h/8 c = self.conv3(torch.cat((g,", "= self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes", "# [N, 2048] x = self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8]", "use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model = ResNet(IRBlock,", "bs 1 w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map)", "stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2)", "= self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.conv2(out) out =", "phi) + ((1.0 - one_hot) * cosine) output *= self.s return output from", "[N, 2048] x = self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8] expression", "self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2,", "x): x = self.conv1(x) x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x)", "phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)", "out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 =", "self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2", "model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs)", "models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear", "self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 =", "self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 =", "model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23, 3],", "8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg, expression, gender,", "stride=1): downsample = None if stride != 1 or self.inplanes != planes *", "elif args.network == 'r34': self.resnet = resnet34(args) elif args.network == 'r50': self.resnet =", "__init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 =", "ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se", "self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64,", "self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi -", "self.resnet(images) h = f[3] # bs 2048 w/32 h/32 g = (self.unpool1(h)) #", "out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3", "nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool =", "= self.bn3(c) c = self.relu(c) h = self.conv4(c) # bs 64 w/8 h/8", "nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32,", "= self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x)", "class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and", "import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import Parameter from", "def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel,", "race = self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def", "nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu =", "def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,", "w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c = self.relu(c)", "as model_zoo from torch import nn from torch.nn import Parameter from config import", "['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',", "residual out = self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True,", "x = self.fc(x) x = self.bn3(x) return x def resnet18(args, **kwargs): model =", "'r101': self.resnet = resnet101(args) else: # args.network == 'r152': self.resnet = resnet152(args) self.relu", "super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and pool layers (since we're", "= models.resnet50(pretrained=True) # Remove linear and pool layers (since we're not doing classification)", "x = F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0", "2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model", "7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N, 2048,", "h = self.conv2(c) # bs 128 w/16 h/16 h = self.bn2(h) h =", "self.resnet = resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args) else: # args.network", "Parameter from config import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',", "self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride", "padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4,", "angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi", "downsample, use_se=self.use_se)) self.inplanes = planes for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se))", "= nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images): x = self.resnet(images) #", "2)) phi = cosine * self.cos_m - sine * self.sin_m # cos(theta +", "= math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m) *", "= self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048) # [N, 2048] x", "model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36, 3],", "0.5) * math.pi / 2 geo = torch.cat((geo_map, angle_map), 1) # bs 5", "g = self.bn7(g) g = self.relu(g) score = self.conv8(g) # bs 1 w/4", "self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c = self.relu(c) h = self.conv6(c) #", "self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7)", "w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512", "nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,", "ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def", "self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256,", "= nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool", "out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if", "downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2", "self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race class", "torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0", "nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or", "self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x) return x", "if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential(", "__init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m", "= self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out =", "self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine >", "= self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes,", "nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2", "256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout", "= self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x =", "doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid", "reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction),", "= self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:, 5:8])", "with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion =", "planes for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self,", "def forward(self, x): residual = x out = self.bn0(x) out = self.conv1(out) out", "self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2", "forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1] x = x.view(-1,", "SEBlock(planes) def forward(self, x): residual = x out = self.bn0(x) out = self.conv1(out)", "layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x =", "f[3] # bs 2048 w/32 h/32 g = (self.unpool1(h)) # bs 2048 w/16", "self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample =", "= downsample self.stride = stride def forward(self, x): residual = x out =", "self.relu(c) h = self.conv2(c) # bs 128 w/16 h/16 h = self.bn2(h) h", "stride, downsample, use_se=self.use_se)) self.inplanes = planes for i in range(1, blocks): layers.append(block(self.inplanes, planes,", "= self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 =", "self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:, 5:8]) gender", "nn.Dropout() if im_size == 112: self.fc = nn.Linear(512 * 7 * 7, 512)", "stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class", "is not None: residual = self.downsample(x) out += residual out = self.prelu(out) return", "self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112: self.fc = nn.Linear(512", "= nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1)", "x = self.conv1(x) x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x", "__init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer (since we're not", "model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3],", "1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes *", "= self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args): super(EastModel,", "= self.downsample(x) out += residual out = self.prelu(out) return out class ResNet(nn.Module): def", "[2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args,", "nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)", "'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\"", "2048 w/32 h/32 g = (self.unpool1(h)) # bs 2048 w/16 h/16 c =", "14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m,", "if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4,", "**kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained:", "kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)", "from torch import nn from torch.nn import Parameter from config import device, num_classes", "args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6,", "layers (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc", "use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model = ResNet(IRBlock,", "== 'r18': self.resnet = resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args) elif", "__init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1", "self.sin_m # cos(theta + m) if self.easy_margin: phi = torch.where(cosine > 0, phi,", "kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4,", "= math.sin(math.pi - self.m) * self.m def forward(self, input, label): x = F.normalize(input)", "= nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def", "return model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se,", "kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64)", "resnet = models.resnet50(pretrained=True) # Remove linear layer (since we're not doing classification) modules", "**kwargs): model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))", "self.bn2(h) h = self.relu(h) g = self.unpool2(h) # bs 128 w/8 h/8 c", "= cosine * self.cos_m - sine * self.sin_m # cos(theta + m) if", "**kwargs): model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))", "class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin", "64 w/8 h/8 h = self.bn4(h) h = self.relu(h) g = self.unpool3(h) #", "'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072,", "self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1)", "elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear):", "self.dropout = nn.Dropout() if im_size == 112: self.fc = nn.Linear(512 * 7 *", "if args.network == 'r18': self.resnet = resnet18(args) elif args.network == 'r34': self.resnet =", "stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se = use_se", "if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine", "F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi =", "= x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x) return x def resnet18(args,", "self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes,", "out += residual out = self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel,", "f = self.resnet(images) h = f[3] # bs 2048 w/32 h/32 g =", "112: self.fc = nn.Linear(512 * 7 * 7, 512) else: # 224 self.fc", "-1) x = self.fc(x) x = self.bn3(x) return x def resnet18(args, **kwargs): model", "= nn.Sigmoid() def forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1]", "layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for i in range(1, blocks):", "layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 =", "= self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 =", "if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4,", "= nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear',", "= F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi", "self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1)", "1 w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) *", "self.mm = math.sin(math.pi - self.m) * self.m def forward(self, input, label): x =", "= nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64,", "nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block,", "# cos(theta + m) if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine)", "nn.Sigmoid() ) def forward(self, x): b, c, _, _ = x.size() y =", "__init__(self, args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet = resnet18(args) elif args.network", "= nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual =", "x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x", "planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes)", "out = self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112):", "return x * y class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes,", "Remove linear layer (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet =", "resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained:", "class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc =", "1) output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output", "= args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m =", "nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x", "def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out", "nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif", "self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3,", "= self.relu(h) g = self.conv7(h) # bs 32 w/4 h/4 g = self.bn7(g)", "= self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g)", "out = self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out) if self.downsample", "3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model", "conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,", "self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1,", "models.resnet50(pretrained=True) # Remove linear layer (since we're not doing classification) modules = list(resnet.children())[:-1]", "self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m,", "'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101':", "layers (since we're not doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout", "'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return", "self.unpool3(h) # bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c =", "conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self,", "> 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine -", "out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1", "in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight,", "torch.nn import Parameter from config import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34',", "out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 =", "= nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 =", "if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4,", "# bs 128 w/16 h/16 h = self.bn2(h) h = self.relu(h) g =", "+= residual out = self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block, layers,", "return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet", "[3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def", "1) return x * y class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes,", "forward(self, images): _, f = self.resnet(images) h = f[3] # bs 2048 w/32", "self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is not None: residual =", "self.inplanes = planes for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers)", "__init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and pool layers (since", "return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove", "out += residual out = self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block,", "64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2],", "nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)", "nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride", "'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes,", "x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) x = self.sigmoid(x)", "self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual", "23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model", "= self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg,", "self.relu(g) score = self.conv8(g) # bs 1 w/4 h/4 score = self.sigmoid(score) geo_map", "36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self,", "torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn", "self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1)", "# bs 32 w/4 h/4 h = self.bn6(h) h = self.relu(h) g =", "out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet", "= 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes,", "out = self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se: out =", "= nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), )", "= nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear',", "nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32,", "is not None: residual = self.downsample(x) out += residual out = self.relu(out) return", "torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) #", "self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32,", "num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18':", "self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 =", "self.fc(x) x = self.bn3(x) return x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2,", "if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias,", "= self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample", "self.relu(h) g = self.unpool3(h) # bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]),", "= self.bn3(x) return x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2,", "self.fc(x) x = self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self):", "= nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes,", "* phi) + ((1.0 - one_hot) * cosine) output *= self.s return output", "inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 =", "stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample,", "resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained:", "self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm =", "if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight", "layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112: self.fc", "bs 32 w/4 h/4 g = self.bn7(g) g = self.relu(g) score = self.conv8(g)", "self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out)", "= nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride", "(one_hot * phi) + ((1.0 - one_hot) * cosine) output *= self.s return", "0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride !=", "import Parameter from config import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50',", "out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out", "nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample", "downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu =", "4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs):", "residual = self.downsample(x) out += residual out = self.prelu(out) return out class ResNet(nn.Module):", "g = self.unpool3(h) # bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1))", "self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and pool layers (since we're not", "stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1,", "def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1", "c = self.relu(c) h = self.conv4(c) # bs 64 w/8 h/8 h =", "x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x) return x def resnet18(args, **kwargs):", "nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x =", "x = self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:,", "**kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__()", "= self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c = self.relu(c) h = self.conv6(c)", "phi = torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th,", "y = self.fc(y).view(b, c, 1, 1) return x * y class IRBlock(nn.Module): expansion", "self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x * y class", "g = self.conv7(h) # bs 32 w/4 h/4 g = self.bn7(g) g =", "use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3,", "(since we're not doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout =", "layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout =", "nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2)", "self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32,", "modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid()", "= self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn2(x) x =", "kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64)", "= self.relu(c) h = self.conv2(c) # bs 128 w/16 h/16 h = self.bn2(h)", "else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(),", "!= planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride,", "= resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args) else: # args.network ==", "conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes)", "= x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out =", "self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU()", "x = self.layer3(x) x = self.layer4(x) x = self.bn2(x) x = self.dropout(x) x", "self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg, expression,", "*= self.s return output from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel,", "model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return", "= 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 =", "nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers =", "[N, 2048] x = self.fc(x) x = self.sigmoid(x) # [N, 8] return x", "'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with", "self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3,", "_, f = self.resnet(images) h = f[3] # bs 2048 w/32 h/32 g", "resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args) elif args.network == 'r50': self.resnet", "* self.sin_m # cos(theta + m) if self.easy_margin: phi = torch.where(cosine > 0,", "= nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def", "self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m) * self.m def", "self.bn0(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.conv2(out)", "planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes *", "residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out", "h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c = self.relu(c) h", "model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight)", "super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 =", "stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1,", "> self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(),", "forward(self, input, label): x = F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W)", "as F import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import", "self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4", "f[2]), 1)) c = self.bn1(c) c = self.relu(c) h = self.conv2(c) # bs", "args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m =", "block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1", "align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f = self.resnet(images)", "return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1)", "self.bn6(h) h = self.relu(h) g = self.conv7(h) # bs 32 w/4 h/4 g", "self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride =", "we're not doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout()", "1] x = self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048) # [N,", "out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 =", "g = (self.unpool1(h)) # bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1))", "self.use_se: self.se = SEBlock(planes) def forward(self, x): residual = x out = self.bn0(x)", "nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel),", "== 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 =", "self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual out", "* cosine) output *= self.s return output from torchvision import models class FrameDetectionModel(nn.Module):", "self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map", "= downsample self.stride = stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes)", "+= residual out = self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16):", "- self.m) self.mm = math.sin(math.pi - self.m) * self.m def forward(self, input, label):", "nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self, x): b, c, _,", "self.downsample(x) out += residual out = self.prelu(out) return out class ResNet(nn.Module): def __init__(self,", "f[0]), 1)) c = self.bn5(c) c = self.relu(c) h = self.conv6(c) # bs", "EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet = resnet18(args)", "1] x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) x =", "reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet =", "self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map -", "def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if", "for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x):", "224 self.fc = nn.Linear(512 * 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for", "self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images):", "g = self.unpool2(h) # bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1))", "import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls =", "self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer (since we're not doing classification)", "= nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d)", "== 'r50': self.resnet = resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args) else:", "self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3,", "self.resnet = resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args) elif args.network ==", "self.downsample = downsample self.stride = stride self.use_se = use_se if self.use_se: self.se =", "# 224 self.fc = nn.Linear(512 * 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512)", "= self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x =", "512) else: # 224 self.fc = nn.Linear(512 * 14 * 14, 512) self.bn3", "self.unpool2(h) # bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c =", "* math.pi / 2 geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4", "= conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes,", "use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual = x out", "def forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c)", "**kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3,", "downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x)", "nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 =", "= args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th =", "channel), nn.Sigmoid() ) def forward(self, x): b, c, _, _ = x.size() y", "= nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 =", "self.m) self.mm = math.sin(math.pi - self.m) * self.m def forward(self, input, label): x", "2048, 1, 1] x = x.view(-1, 2048) # [N, 2048] x = self.fc(x)", "self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi / 2 geo = torch.cat((geo_map,", "= x.view(-1, 2048) # [N, 2048] x = self.fc(x) reg = self.sigmoid(x[:, :5])", "= [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for i in", "self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x)", "'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1):", "h/4 h = self.bn6(h) h = self.relu(h) g = self.conv7(h) # bs 32", "'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50':", "= x out = self.bn0(x) out = self.conv1(out) out = self.bn1(out) out =", "padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6", "2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model =", "out = self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__()", "def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and pool layers", "block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes,", "import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch import nn from", "= nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 =", "return model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se,", "x = self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x)", "args.network == 'r18': self.resnet = resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args)", "not None: residual = self.downsample(x) out += residual out = self.relu(out) return out", "nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1)", "= self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x =", "if im_size == 112: self.fc = nn.Linear(512 * 7 * 7, 512) else:", "x.view(-1, 2048) # [N, 2048] x = self.fc(x) x = self.sigmoid(x) # [N,", "self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2,", "128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512,", "self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn2(x)", "self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7", "nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images): x = self.resnet(images) # [N,", "super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and pool layers (since we're", "use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel,", "model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes,", "self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:,", "// reduction, channel), nn.Sigmoid() ) def forward(self, x): b, c, _, _ =", "return model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se,", "out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not", "= { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', }", "x = x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x) return x def", "= models.resnet50(pretrained=True) # Remove linear layer (since we're not doing classification) modules =", "padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None):", "= self.bn4(h) h = self.relu(h) g = self.unpool3(h) # bs 64 w/4 h/4", "self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128,", "layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2)", "self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__()", "# bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c)", "x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out)", "kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64)", "128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c =", "**kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))", "doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool =", "= F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0 -", "((1.0 - one_hot) * cosine) output *= self.s return output from torchvision import", "nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block,", "self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10])", "planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for i in range(1, blocks): layers.append(block(self.inplanes,", "self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU()", "self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block,", "nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 =", "self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is not", "* 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self,", "self.se = SEBlock(planes) def forward(self, x): residual = x out = self.bn0(x) out", "math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi -", "- torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m #", "out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.conv2(out) out", "modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid()", "= self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x) x", "out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out", "512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112:", "one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0 - one_hot)", "reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self, x): b, c,", "* self.m def forward(self, input, label): x = F.normalize(input) W = F.normalize(self.weight) cosine", "angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi / 2 geo", "+ ((1.0 - one_hot) * cosine) output *= self.s return output from torchvision", "expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 =", "7 * 7, 512) else: # 224 self.fc = nn.Linear(512 * 14 *", "args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36,", "nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f = self.resnet(images) h = f[3]", "x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x", "ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def", "stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu", "= list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc =", "self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c = self.relu(c) h = self.conv2(c) #", "self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m)", "= 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes,", "nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias,", "x = self.layer4(x) x = self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1)", "ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model", "glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg, expression, gender, glasses,", "= resnet101(args) else: # args.network == 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU()", "def forward(self, input, label): x = F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x,", "im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64,", "= self.bn1(out) out = self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se:", "= self.layer3(x) x = self.layer4(x) x = self.bn2(x) x = self.dropout(x) x =", "self.stride = stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def forward(self,", "self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel,", "= nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1]", "7, 512) else: # 224 self.fc = nn.Linear(512 * 14 * 14, 512)", "args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23,", "nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride", "= self.resnet(images) h = f[3] # bs 2048 w/32 h/32 g = (self.unpool1(h))", "nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax =", "self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet", "c) y = self.fc(y).view(b, c, 1, 1) return x * y class IRBlock(nn.Module):", "nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes", "'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution", "= nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N,", "torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch import nn", "self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 =", "[N, 2048, 1, 1] x = x.view(-1, 2048) # [N, 2048] x =", "self.bn1(c) c = self.relu(c) h = self.conv2(c) # bs 128 w/16 h/16 h", "_ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1)", "'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes,", "= f[3] # bs 2048 w/32 h/32 g = (self.unpool1(h)) # bs 2048", "use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model = ResNet(IRBlock,", "bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c", "device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = {", "h = self.conv6(c) # bs 32 w/4 h/4 h = self.bn6(h) h =", "import torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch import", "self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool =", "= nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear',", "= nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64,", "cosine * self.cos_m - sine * self.sin_m # cos(theta + m) if self.easy_margin:", "align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self,", "self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride =", "self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1,", "args.network == 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1", "F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine,", "= self.bn6(h) h = self.relu(h) g = self.conv7(h) # bs 32 w/4 h/4", "c = self.bn5(c) c = self.relu(c) h = self.conv6(c) # bs 32 w/4", "# [N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet =", "classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4)", "downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 =", "W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m -", "= self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is not None: residual", "[N, 2048, 1, 1] x = self.dropout(x) x = self.avgpool(x) x = x.view(-1,", "{ 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def", "def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer (since we're", "forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1] x = self.dropout(x)", "return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x =", "nn.Sigmoid() def forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1] x", "resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args) else: # args.network == 'r152':", "x): residual = x out = self.bn0(x) out = self.conv1(out) out = self.bn1(out)", "self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out", "kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32,", "expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 =", "resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained:", "downsample = None if stride != 1 or self.inplanes != planes * block.expansion:", "= self.unpool3(h) # bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c", "out = self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if", "4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x):", "residual out = self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock,", "self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual =", "stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual", "planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se =", "stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 =", "super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu", "def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if", "__init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and pool layers (since", ":5]) # [N, 8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses", "= nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x", "= nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() )", "/ 2 geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4 w/4 return", "out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 =", "self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual", "self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x)", "torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m", "= self.bn7(g) g = self.relu(g) score = self.conv8(g) # bs 1 w/4 h/4", "self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1)", "= (angle_map - 0.5) * math.pi / 2 geo = torch.cat((geo_map, angle_map), 1)", "nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2", "sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine", "= nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128)", "2048) # [N, 2048] x = self.fc(x) out = self.softmax(x) return out class", "isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)", "classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid =", "= x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return", "self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 =", "self.s return output from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__()", "self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images)", "* 7 * 7, 512) else: # 224 self.fc = nn.Linear(512 * 14", "self.easy_margin = args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m", "3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model =", "# [N, 8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses =", "= ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model", "x.view(-1, 2048) # [N, 2048] x = self.fc(x) out = self.softmax(x) return out", "__init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel", "input, label): x = F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W) sine", "17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images)", "out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual =", "h = self.relu(h) g = self.unpool2(h) # bs 128 w/8 h/8 c =", "= nn.Linear(512 * 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for m in", "nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m)", "self.bn3(x) return x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2, 2],", "self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel", "isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def", "x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear", "output *= self.s return output from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self):", "planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True)", "= resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args) elif args.network == 'r50':", "= stride def forward(self, x): residual = x out = self.conv1(x) out =", "nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N,", "x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y =", "args.network == 'r50': self.resnet = resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args)", "**kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3,", "**kwargs): model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))", "'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',", "self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048) # [N, 2048] x =", "h/16 h = self.bn2(h) h = self.relu(h) g = self.unpool2(h) # bs 128", "kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample", "nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes *", "* block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes =", "block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes", "= self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion", "x = self.resnet(images) # [N, 2048, 1, 1] x = x.view(-1, 2048) #", "x = self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel,", "= self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block,", "resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained:", "nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes", "model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))", "args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm", "nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64,", "stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes,", "mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def", "resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1", "= self.relu(c) h = self.conv6(c) # bs 32 w/4 h/4 h = self.bn6(h)", "kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 =", "self.downsample is not None: residual = self.downsample(x) out += residual out = self.prelu(out)", "self.downsample = downsample self.stride = stride def forward(self, x): residual = x out", "race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear", "out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2", "= None if stride != 1 or self.inplanes != planes * block.expansion: downsample", "= self.bn2(h) h = self.relu(h) g = self.unpool2(h) # bs 128 w/8 h/8", "self.m) * self.m def forward(self, input, label): x = F.normalize(input) W = F.normalize(self.weight)", "from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True)", "blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes", "= nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f = self.resnet(images) h =", "'r50': self.resnet = resnet50(args) elif args.network == 'r101': self.resnet = resnet101(args) else: #", "bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride", "torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine * self.sin_m # cos(theta", "(angle_map - 0.5) * math.pi / 2 geo = torch.cat((geo_map, angle_map), 1) #", "out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual =", "nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self,", "2048) # [N, 2048] x = self.fc(x) reg = self.sigmoid(x[:, :5]) # [N,", "= self.relu(h) g = self.unpool3(h) # bs 64 w/4 h/4 c = self.conv5(torch.cat((g,", "8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13])", "model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return", "= use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 =", "nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None", "cosine) output *= self.s return output from torchvision import models class FrameDetectionModel(nn.Module): def", "= nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _,", "resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if", "= self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual", "nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320,", "'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes,", "kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes,", "nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self, x): b, c, _, _", "self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual", "= nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1)", "8) self.sigmoid = nn.Sigmoid() def forward(self, images): x = self.resnet(images) # [N, 2048,", "out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)", "= nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2", "self.resnet(images) # [N, 2048, 1, 1] x = self.dropout(x) x = self.avgpool(x) x", "1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes,", "= self.fc(y).view(b, c, 1, 1) return x * y class IRBlock(nn.Module): expansion =", "stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112: self.fc =", "SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(", "kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 =", "Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1", "= self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__()", "out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out +=", "planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1,", "* 14, 512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d):", "[N, 8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True)", "glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove", "model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3],", "output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output *=", "4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args,", "models.resnet50(pretrained=True) # Remove linear and pool layers (since we're not doing classification) modules", "h = self.relu(h) g = self.unpool3(h) # bs 64 w/4 h/4 c =", "2048] x = self.fc(x) reg = self.sigmoid(x[:, :5]) # [N, 8] expression =", "layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3],", "= self.conv6(c) # bs 32 w/4 h/4 h = self.bn6(h) h = self.relu(h)", "def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.prelu(x) x", "elif args.network == 'r50': self.resnet = resnet50(args) elif args.network == 'r101': self.resnet =", "= self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x =", "self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi / 2", "__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',", "= nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64,", "x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out)", "one_hot) * cosine) output *= self.s return output from torchvision import models class", "= nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,", "super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer (since we're not doing", "planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),", "W = F.normalize(self.weight) cosine = F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2))", "1)) c = self.bn5(c) c = self.relu(c) h = self.conv6(c) # bs 32", "IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__()", "x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x", "torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine", "return x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se,", "class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet =", "self.conv1(x) x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x)", "self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 =", "use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64)", "label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0 - one_hot) *", "= x.view(-1, 2048) # [N, 2048] x = self.fc(x) x = self.sigmoid(x) #", "self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes,", "super(EastModel, self).__init__() if args.network == 'r18': self.resnet = resnet18(args) elif args.network == 'r34':", "F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m", "= nn.Dropout() if im_size == 112: self.fc = nn.Linear(512 * 7 * 7,", "= self.relu(g) score = self.conv8(g) # bs 1 w/4 h/4 score = self.sigmoid(score)", "def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs) if", "x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn2(x) x", "model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return", "nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x", "class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__()", "nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images): x =", "if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual = x out =", "**kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3,", "pool layers (since we're not doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules)", "= nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes", "* y class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None,", "ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class", "= math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m) * self.m def forward(self,", "channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self, x):", "[3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args,", "<filename>models.py import math import torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo", "nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers", "classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid =", "= use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x): residual = x", "nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f", "self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block,", "= self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x * y", "nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4", "= x.view(-1, 2048) # [N, 2048] x = self.fc(x) out = self.softmax(x) return", "self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3", "range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x)", "w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c = self.relu(c)", "4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample =", "if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8,", "2048, 1, 1] x = self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048)", "= self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network", "h/8 h = self.bn4(h) h = self.relu(h) g = self.unpool3(h) # bs 64", "# Remove linear and pool layers (since we're not doing classification) modules =", "self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images): x = self.resnet(images)", "== 112: self.fc = nn.Linear(512 * 7 * 7, 512) else: # 224", "isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample =", "= self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:,", "out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc", "torch.where(cosine > self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1,", "self.fc = nn.Linear(512 * 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for m", "conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride)", "padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4", "x = self.fc(x) x = self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module):", "= nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128,", "planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes !=", "nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self,", "- 0.5) * math.pi / 2 geo = torch.cat((geo_map, angle_map), 1) # bs", "# bs 64 w/8 h/8 h = self.bn4(h) h = self.relu(h) g =", "bs 128 w/16 h/16 h = self.bn2(h) h = self.relu(h) g = self.unpool2(h)", "torch import nn from torch.nn import Parameter from config import device, num_classes __all__", "nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3", "self.conv8(g) # bs 1 w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map", "self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 =", "# [N, 2048, 1, 1] x = self.dropout(x) x = self.avgpool(x) x =", "= self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi /", "= self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self):", "= nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1)", "bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2", "# bs 32 w/4 h/4 g = self.bn7(g) g = self.relu(g) score =", "padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1", "= list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax", "from config import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']", "w/16 h/16 h = self.bn2(h) h = self.relu(h) g = self.unpool2(h) # bs", "kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride,", "out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes,", "self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid()", "= self.conv4(c) # bs 64 w/8 h/8 h = self.bn4(h) h = self.relu(h)", "y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x *", "cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) one_hot =", "= self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out =", "= self.unpool2(h) # bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c", "64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2,", "args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi", "self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size ==", "or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion,", "c = self.bn3(c) c = self.relu(c) h = self.conv4(c) # bs 64 w/8", "self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 =", "self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x) x =", "class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__()", "= torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine * self.cos_m - sine *", "model def resnet34(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs)", "= nn.Linear(512 * 7 * 7, 512) else: # 224 self.fc = nn.Linear(512", "nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640,", "padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)", "nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se = use_se if self.use_se: self.se", "h = self.relu(h) g = self.conv7(h) # bs 32 w/4 h/4 g =", "= self.relu(out) return out class SEBlock(nn.Module): def __init__(self, channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool", "4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args, **kwargs):", "padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32)", "h = self.bn2(h) h = self.relu(h) g = self.unpool2(h) # bs 128 w/8", "self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out)", "None if stride != 1 or self.inplanes != planes * block.expansion: downsample =", "device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0 -", "nn.Linear(512 * 7 * 7, 512) else: # 224 self.fc = nn.Linear(512 *", "self.m = args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th", "= self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out)", "def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride)", "model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',", "self.resnet(images) # [N, 2048, 1, 1] x = x.view(-1, 2048) # [N, 2048]", "- self.m) * self.m def forward(self, input, label): x = F.normalize(input) W =", "= self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race", "out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out", "planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x):", "x = self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048) # [N, 2048]", "self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu =", "= ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model", "* 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample", "32 w/4 h/4 h = self.bn6(h) h = self.relu(h) g = self.conv7(h) #", "self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network ==", "gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17]) return", "h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c = self.relu(c) h", "self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is", "= nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)", "residual = x out = self.bn0(x) out = self.conv1(out) out = self.bn1(out) out", "1, 1) return x * y class IRBlock(nn.Module): expansion = 1 def __init__(self,", "4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes,", "= self.conv8(g) # bs 1 w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g)", "nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self,", "= F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine *", "out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 =", "FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and pool", "one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi)", "== 'r101': self.resnet = resnet101(args) else: # args.network == 'r152': self.resnet = resnet152(args)", "bs 64 w/8 h/8 h = self.bn4(h) h = self.relu(h) g = self.unpool3(h)", "im_size == 112: self.fc = nn.Linear(512 * 7 * 7, 512) else: #", "if self.use_se: out = self.se(out) if self.downsample is not None: residual = self.downsample(x)", "'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3", "c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c = self.relu(c) h =", "self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample", "nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5", "gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True) #", "self.relu(h) g = self.conv7(h) # bs 32 w/4 h/4 g = self.bn7(g) g", "self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4)", "import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove", "(since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc =", "14, 512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight)", "nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample =", "5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race = self.softmax(x[:, 13:17])", "g = self.relu(g) score = self.conv8(g) # bs 1 w/4 h/4 score =", "torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import Parameter from config", "out = self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is not None:", "self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1)", "Remove linear and pool layers (since we're not doing classification) modules = list(resnet.children())[:-2]", "= self.fc(x) x = self.sigmoid(x) # [N, 8] return x class FaceAttributeModel(nn.Module): def", "out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4", "self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out)", "nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)", "2048] x = self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module): def __init__(self,", "FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and pool", "= nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample", "self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images):", "= self.resnet(images) # [N, 2048, 1, 1] x = x.view(-1, 2048) # [N,", "# [N, 2048] x = self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module):", "self.bn4(h) h = self.relu(h) g = self.unpool3(h) # bs 64 w/4 h/4 c", "512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5) *", "= nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride", "super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(),", "list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self,", "x = self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x)", "= nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu", "forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y", "= self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size", "= nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax", "= nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) #", "self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64,", "self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128,", "1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes)", "angle_map = (angle_map - 0.5) * math.pi / 2 geo = torch.cat((geo_map, angle_map),", "self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot *", "1, 1] x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) reg", "cosine = F.linear(x, W) sine = torch.sqrt(1.0 - torch.pow(cosine, 2)) phi = cosine", "= self.resnet(images) # [N, 2048, 1, 1] x = self.dropout(x) x = self.avgpool(x)", "self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5", "= self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out =", "= nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,", "self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.bn2(x) x = self.dropout(x)", "kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool2 =", "self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2", "y class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):", "stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1", "h/4 g = self.bn7(g) g = self.relu(g) score = self.conv8(g) # bs 1", "self.conv4(c) # bs 64 w/8 h/8 h = self.bn4(h) h = self.relu(h) g", "doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid", "downsample self.stride = stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def", "= nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 =", "else: # args.network == 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid =", "h = self.bn6(h) h = self.relu(h) g = self.conv7(h) # bs 32 w/4", "= (self.unpool1(h)) # bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c", "kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)", "resnet101(args) else: # args.network == 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid", "inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1", "return out class EastModel(nn.Module): def __init__(self, args): super(EastModel, self).__init__() if args.network == 'r18':", "c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c,", "mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f =", "self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c = self.relu(c) h = self.conv4(c) #", "h = self.conv4(c) # bs 64 w/8 h/8 h = self.bn4(h) h =", "else: # 224 self.fc = nn.Linear(512 * 14 * 14, 512) self.bn3 =", "**kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3,", "self).__init__() if args.network == 'r18': self.resnet = resnet18(args) elif args.network == 'r34': self.resnet", "= ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34':", "w/8 h/8 h = self.bn4(h) h = self.relu(h) g = self.unpool3(h) # bs", "return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None):", "not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8)", "self).__init__() resnet = models.resnet101(pretrained=True) # Remove linear and pool layers (since we're not", "x * y class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1,", "images): x = self.resnet(images) # [N, 2048, 1, 1] x = x.view(-1, 2048)", "nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1] x", "m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):", "= nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x", "nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128,", "def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1", "not doing classification) modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool", "self.bn3(c) c = self.relu(c) h = self.conv4(c) # bs 64 w/8 h/8 h", "= stride self.use_se = use_se if self.use_se: self.se = SEBlock(planes) def forward(self, x):", "kernel_size=1) self.bn1 = nn.BatchNorm2d(128) self.conv2 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128)", "= SEBlock(planes) def forward(self, x): residual = x out = self.bn0(x) out =", ") def forward(self, x): b, c, _, _ = x.size() y = self.avg_pool(x).view(b,", "use_se=self.use_se)) self.inplanes = planes for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return", "m) if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi =", "list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048,", "c, 1, 1) return x * y class IRBlock(nn.Module): expansion = 1 def", "forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.prelu(x) x =", "} def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3,", "self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out", "= args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m)", "x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) out = self.softmax(x)", "self.resnet = resnet101(args) else: # args.network == 'r152': self.resnet = resnet152(args) self.relu =", "not None: residual = self.downsample(x) out += residual out = self.prelu(out) return out", "nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1)", "= self.avgpool(x) x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) out", "ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin =", "bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3", "inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2", "1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes,", "x.view(-1, 2048) # [N, 2048] x = self.fc(x) reg = self.sigmoid(x[:, :5]) #", "residual = self.downsample(x) out += residual out = self.relu(out) return out class SEBlock(nn.Module):", "F import torch.utils.model_zoo as model_zoo from torch import nn from torch.nn import Parameter", "def forward(self, images): x = self.resnet(images) # [N, 2048, 1, 1] x =", "out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes,", "x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) return x", "args.network == 'r101': self.resnet = resnet101(args) else: # args.network == 'r152': self.resnet =", "= self.se(out) if self.downsample is not None: residual = self.downsample(x) out += residual", "1).long(), 1) output = (one_hot * phi) + ((1.0 - one_hot) * cosine)", "bs 2048 w/32 h/32 g = (self.unpool1(h)) # bs 2048 w/16 h/16 c", "nn.Linear(512 * 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules():", "= models.resnet101(pretrained=True) # Remove linear and pool layers (since we're not doing classification)", "= nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3 = nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1)", "planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2", "64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)", "linear layer (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules)", "stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout()", "nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self,", "= self.bn5(c) c = self.relu(c) h = self.conv6(c) # bs 32 w/4 h/4", "2 geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4 w/4 return score,", "return out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64", "i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x", "\"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module):", "output from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet =", ") layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for", "= self.bn1(c) c = self.relu(c) h = self.conv2(c) # bs 128 w/16 h/16", "kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 =", "w/4 h/4 h = self.bn6(h) h = self.relu(h) g = self.conv7(h) # bs", "= list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def", "stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes)", "c = self.relu(c) h = self.conv2(c) # bs 128 w/16 h/16 h =", "config import device, num_classes __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls", "if self.downsample is not None: residual = self.downsample(x) out += residual out =", "1)) c = self.bn1(c) c = self.relu(c) h = self.conv2(c) # bs 128", "super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m", "2048) # [N, 2048] x = self.fc(x) x = self.sigmoid(x) # [N, 8]", "2048] x = self.fc(x) x = self.sigmoid(x) # [N, 8] return x class", "(self.unpool1(h)) # bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c =", "x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x = self.layer1(x) x", "= nn.BatchNorm2d(inplanes) self.prelu = nn.PReLU() self.conv2 = conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes)", "model_zoo from torch import nn from torch.nn import Parameter from config import device,", "[3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(args,", "c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c = self.relu(c) h =", "model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return", "h/32 g = (self.unpool1(h)) # bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]),", "128 w/16 h/16 h = self.bn2(h) h = self.relu(h) g = self.unpool2(h) #", "* block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes", "forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out =", "3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model =", "resnet = models.resnet101(pretrained=True) # Remove linear and pool layers (since we're not doing", "block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes *", "def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se = use_se super(ResNet,", "h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map", "phi = torch.where(cosine > self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device)", "self.conv1(out) out = self.bn1(out) out = self.prelu(out) out = self.conv2(out) out = self.bn2(out)", "[N, 8] expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:,", "resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args) elif args.network == 'r101': self.resnet", "+= residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def", "return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))", "= torch.where(cosine > 0, phi, cosine) else: phi = torch.where(cosine > self.th, phi,", "stride=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1", "= nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1)", "class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer", "[N, 2048] x = self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module): def", "self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1) self.bn1 =", "self.resnet = resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args) elif args.network ==", "self.fc(y).view(b, c, 1, 1) return x * y class IRBlock(nn.Module): expansion = 1", "bs 2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c", "forward(self, x): residual = x out = self.bn0(x) out = self.conv1(out) out =", "= self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None:", "* 7, 512) else: # 224 self.fc = nn.Linear(512 * 14 * 14,", "math import torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from torch", "[] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for i in range(1,", "= self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out =", "= nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes)", "nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1) self.bn2 = nn.BatchNorm2d(128) self.conv3 = nn.Conv2d(in_channels=640, out_channels=64, kernel_size=1) self.bn3", "reg = self.sigmoid(x[:, :5]) # [N, 8] expression = self.softmax(x[:, 5:8]) gender =", "0, phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm)", "kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32)", "h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c = self.relu(c) h", "out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if", "// reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def forward(self, x): b,", "self.inplanes = 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3,", "expression = self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race", "= nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10", "64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c =", "use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes)", "nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.prelu(x)", "list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax =", "# [N, 2048] x = self.fc(x) x = self.sigmoid(x) # [N, 8] return", "h = self.bn4(h) h = self.relu(h) g = self.unpool3(h) # bs 64 w/4", "- self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot", "mode='bilinear', align_corners=False) def forward(self, images): _, f = self.resnet(images) h = f[3] #", "math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m) * self.m", "expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0", "self.cos_m - sine * self.sin_m # cos(theta + m) if self.easy_margin: phi =", "2, 2, 2], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(args, **kwargs):", "ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def", "modules = list(resnet.children())[:-2] self.resnet = nn.Sequential(*modules) self.dropout = nn.Dropout() self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc", "x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2, 2, 2], use_se=args.use_se, **kwargs)", "= nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1,", "images): x = self.resnet(images) # [N, 2048, 1, 1] x = self.dropout(x) x", "= planes for i in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def", "self.fc = nn.Linear(512 * 7 * 7, 512) else: # 224 self.fc =", "math.sin(math.pi - self.m) * self.m def forward(self, input, label): x = F.normalize(input) W", "10:13]) race = self.softmax(x[:, 13:17]) return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module):", "= nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction,", "512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif", "= self.softmax(x[:, 5:8]) gender = self.softmax(x[:, 8:10]) glasses = self.softmax(x[:, 10:13]) race =", "class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se =", "self.m def forward(self, input, label): x = F.normalize(input) W = F.normalize(self.weight) cosine =", "= nn.Sequential(*modules) self.fc = nn.Linear(2048, 8) self.sigmoid = nn.Sigmoid() def forward(self, images): x", "args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6,", "* 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map - 0.5)", "* self.cos_m - sine * self.sin_m # cos(theta + m) if self.easy_margin: phi", "nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if", "in range(1, blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x =", "super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu", "channel, reduction=16): super(SEBlock, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel //", "out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out +=", "self.sigmoid = nn.Sigmoid() def forward(self, images): x = self.resnet(images) # [N, 2048, 1,", "out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out", "'r18': self.resnet = resnet18(args) elif args.network == 'r34': self.resnet = resnet34(args) elif args.network", "nn from torch.nn import Parameter from config import device, num_classes __all__ = ['ResNet',", "self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out", "elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample", "self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes, inplanes) self.bn1 = nn.BatchNorm2d(inplanes) self.prelu =", "# [N, 2048, 1, 1] x = x.view(-1, 2048) # [N, 2048] x", "cos(theta + m) if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else:", "geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map =", "= ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model", "self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual", "stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3", "bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c", "use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model =", "- one_hot) * cosine) output *= self.s return output from torchvision import models", "self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images): _, f = self.resnet(images) h", "self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out)", "args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m = math.cos(self.m) self.sin_m = math.sin(self.m)", "self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is", "c = self.relu(c) h = self.conv6(c) # bs 32 w/4 h/4 h =", "x = self.avgpool(x) x = x.view(-1, 2048) # [N, 2048] x = self.fc(x)", "self.avgpool = nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images):", "[3, 8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module):", "args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m =", "= (one_hot * phi) + ((1.0 - one_hot) * cosine) output *= self.s", "score = self.sigmoid(score) geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map =", "self.relu(c) h = self.conv4(c) # bs 64 w/8 h/8 h = self.bn4(h) h", "self.bn5(c) c = self.relu(c) h = self.conv6(c) # bs 32 w/4 h/4 h", "model def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size,", "w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c = self.relu(c)", "self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion =", "= nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1],", "planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x)", "None: residual = self.downsample(x) out += residual out = self.prelu(out) return out class", "= self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map)", "self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N, 2048, 1,", "out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not", "# Remove linear layer (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet", "__init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)", "linear and pool layers (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet", "we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048,", "self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512) self.dropout = nn.Dropout() if", "out = self.bn0(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out", "self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False) self.bn1", "nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m,", "return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def", "self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.bn2 = nn.BatchNorm2d(512)", "def __init__(self, args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet = resnet18(args) elif", "Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s = args.margin_s self.cos_m", "nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32,", "nn.Sequential(*modules) self.fc = nn.Linear(2048, 17) self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self,", "= resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args) elif args.network == 'r101':", "# bs 2048 w/32 h/32 g = (self.unpool1(h)) # bs 2048 w/16 h/16", "out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 =", "c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c) c = self.relu(c) h =", "h = f[3] # bs 2048 w/32 h/32 g = (self.unpool1(h)) # bs", "self.downsample(x) out += residual out = self.relu(out) return out class SEBlock(nn.Module): def __init__(self,", "= self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c = self.relu(c) h = self.conv2(c)", "self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes,", "nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 = self._make_layer(block, 64, layers[0])", "= self.relu(c) h = self.conv4(c) # bs 64 w/8 h/8 h = self.bn4(h)", "super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True)", "out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck,", "self.bn7(g) g = self.relu(g) score = self.conv8(g) # bs 1 w/4 h/4 score", "self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map", "= nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se = use_se if self.use_se:", "import math import torch import torch.nn.functional as F import torch.utils.model_zoo as model_zoo from", "not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc = nn.Linear(2048, 17)", "def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin", "elif args.network == 'r101': self.resnet = resnet101(args) else: # args.network == 'r152': self.resnet", "bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))", "return output from torchvision import models class FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet", "self.relu(c) h = self.conv6(c) # bs 32 w/4 h/4 h = self.bn6(h) h", "nn.AvgPool2d(kernel_size=4) self.fc = nn.Linear(2048, 7) self.softmax = nn.Softmax(dim=-1) def forward(self, images): x =", "images): _, f = self.resnet(images) h = f[3] # bs 2048 w/32 h/32", "* block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes,", "= math.cos(self.m) self.sin_m = math.sin(self.m) self.th = math.cos(math.pi - self.m) self.mm = math.sin(math.pi", "# bs 64 w/4 h/4 c = self.conv5(torch.cat((g, f[0]), 1)) c = self.bn5(c)", "out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out +=", "sine * self.sin_m # cos(theta + m) if self.easy_margin: phi = torch.where(cosine >", "models.resnet101(pretrained=True) # Remove linear and pool layers (since we're not doing classification) modules", "math.cos(math.pi - self.m) self.mm = math.sin(math.pi - self.m) * self.m def forward(self, input,", "align_corners=False) def forward(self, images): _, f = self.resnet(images) h = f[3] # bs", "nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks,", "self.prelu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes =", "self.layer3(x) x = self.layer4(x) x = self.bn2(x) x = self.dropout(x) x = x.view(x.size(0),", "c = self.bn1(c) c = self.relu(c) h = self.conv2(c) # bs 128 w/16", "self.relu(h) g = self.unpool2(h) # bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]),", "= nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual =", "geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4 w/4 return score, geo", "= self.layer4(x) x = self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1) x", "= self.conv1(x) x = self.bn1(x) x = self.prelu(x) x = self.maxpool(x) x =", "layers = [] layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) self.inplanes = planes for i", "convolution with padding\"\"\" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion", "'r34': self.resnet = resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args) elif args.network", "out_channels=32, kernel_size=3, padding=1) self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7", "= self.bn0(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out) out =", "blocks): layers.append(block(self.inplanes, planes, use_se=self.use_se)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x", "planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 =", "'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): \"\"\"3x3 convolution with padding\"\"\" return nn.Conv2d(in_planes,", "nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) # [N, 2048,", "self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes)", "= self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map = self.sigmoid(angle_map) angle_map = (angle_map", "return model def resnet152(args, **kwargs): model = ResNet(IRBlock, [3, 8, 36, 3], use_se=args.use_se,", "and pool layers (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet =", "= torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) +", "0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1):", "= self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual", "nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.conv9", "layer (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules) self.fc", "residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self,", "self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out) if", "resnet = models.resnet50(pretrained=True) # Remove linear and pool layers (since we're not doing", "32 w/4 h/4 g = self.bn7(g) g = self.relu(g) score = self.conv8(g) #", "1, 1] x = self.dropout(x) x = self.avgpool(x) x = x.view(-1, 2048) #", "= conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride", "= self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample", "isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight)", "= nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample", "score = self.conv8(g) # bs 1 w/4 h/4 score = self.sigmoid(score) geo_map =", "im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs): model = ResNet(IRBlock,", "or isinstance(m, nn.BatchNorm1d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) nn.init.constant_(m.bias, 0)", "= ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return", "FrameDetectionModel(nn.Module): def __init__(self): super(FrameDetectionModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear layer (since", "self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se = use_se if", "pool layers (since we're not doing classification) modules = list(resnet.children())[:-1] self.resnet = nn.Sequential(*modules)", "geo_map = self.conv9(g) geo_map = self.sigmoid(geo_map) * 512 angle_map = self.conv10(g) angle_map =", "f[1]), 1)) c = self.bn3(c) c = self.relu(c) h = self.conv4(c) # bs", "def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and pool layers", "= self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x)", "x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) reg = self.sigmoid(x[:,", "planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes)", "self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out)", "math.pi / 2 geo = torch.cat((geo_map, angle_map), 1) # bs 5 w/4 w/4", "conv3x3(inplanes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.use_se", "w/32 h/32 g = (self.unpool1(h)) # bs 2048 w/16 h/16 c = self.conv1(torch.cat((g,", "= 64 self.use_se = use_se super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1,", "x = self.fc(x) out = self.softmax(x) return out class EastModel(nn.Module): def __init__(self, args):", "1)) c = self.bn3(c) c = self.relu(c) h = self.conv4(c) # bs 64", "label): x = F.normalize(input) W = F.normalize(self.weight) cosine = F.linear(x, W) sine =", "# args.network == 'r152': self.resnet = resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid()", "= self.conv2(c) # bs 128 w/16 h/16 h = self.bn2(h) h = self.relu(h)", "self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel //", "phi, cosine) else: phi = torch.where(cosine > self.th, phi, cosine - self.mm) one_hot", "= self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None:", "out class ResNet(nn.Module): def __init__(self, block, layers, use_se=True, im_size=112): self.inplanes = 64 self.use_se", "def resnet50(args, **kwargs): model = ResNet(IRBlock, [3, 4, 6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs)", "from torch.nn import Parameter from config import device, num_classes __all__ = ['ResNet', 'resnet18',", "linear and pool layers (since we're not doing classification) modules = list(resnet.children())[:-2] self.resnet", "- sine * self.sin_m # cos(theta + m) if self.easy_margin: phi = torch.where(cosine", "import nn from torch.nn import Parameter from config import device, num_classes __all__ =", "class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) # Remove linear and", "downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion),", "= conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def", "self.sigmoid = nn.Sigmoid() self.softmax = nn.Softmax(dim=-1) def forward(self, images): x = self.resnet(images) #", "= resnet152(args) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv1 = nn.Conv2d(in_channels=3072, out_channels=128, kernel_size=1)", "and pool layers (since we're not doing classification) modules = list(resnet.children())[:-2] self.resnet =", "+ m) if self.easy_margin: phi = torch.where(cosine > 0, phi, cosine) else: phi", "cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output =", "bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock,", "nn.Conv2d(in_channels=320, out_channels=64, kernel_size=1) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=1) self.bn6", "= self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c) c = self.relu(c) h = self.conv4(c)", "self.conv6(c) # bs 32 w/4 h/4 h = self.bn6(h) h = self.relu(h) g", "= self.sigmoid(angle_map) angle_map = (angle_map - 0.5) * math.pi / 2 geo =", "nn.Sequential( nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), nn.Sigmoid() ) def", "3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args):", "self).__init__() self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s", "* 14 * 14, 512) self.bn3 = nn.BatchNorm1d(512) for m in self.modules(): if", "= self.conv2(out) out = self.bn2(out) if self.use_se: out = self.se(out) if self.downsample is", "= nn.BatchNorm2d(64) self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 =", "planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = []", "b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b,", "def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1,", "phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output", "expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__() resnet = models.resnet101(pretrained=True)", "= nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112: self.fc = nn.Linear(512 *", "block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes", "inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock, self).__init__() self.bn0 = nn.BatchNorm2d(inplanes) self.conv1 = conv3x3(inplanes,", "= conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes,", "_, _ = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1,", "args.network == 'r34': self.resnet = resnet34(args) elif args.network == 'r50': self.resnet = resnet50(args)", "def resnet101(args, **kwargs): model = ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if", "8, 36, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def", "self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s =", "self.conv9 = nn.Conv2d(in_channels=32, out_channels=4, kernel_size=1) self.conv10 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=1) self.unpool1 = nn.Upsample(scale_factor=2,", "'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',", "= self.downsample(x) out += residual out = self.relu(out) return out class SEBlock(nn.Module): def", "residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module):", "self.conv2(c) # bs 128 w/16 h/16 h = self.bn2(h) h = self.relu(h) g", "x = self.bn3(x) return x def resnet18(args, **kwargs): model = ResNet(IRBlock, [2, 2,", "# bs 1 w/4 h/4 score = self.sigmoid(score) geo_map = self.conv9(g) geo_map =", "8] return x class FaceAttributeModel(nn.Module): def __init__(self): super(FaceAttributeModel, self).__init__() resnet = models.resnet50(pretrained=True) #", "= Parameter(torch.FloatTensor(num_classes, args.emb_size)) nn.init.xavier_uniform_(self.weight) self.easy_margin = args.easy_margin self.m = args.margin_m self.s = args.margin_s", "nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride =", "self.avgpool(x) x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) out =", "self.bn6 = nn.BatchNorm2d(32) self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8", "BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1", "self.conv7 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, padding=1) self.bn7 = nn.BatchNorm2d(32) self.conv8 = nn.Conv2d(in_channels=32, out_channels=1,", "self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out", "1, 1] x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) x", "= self.dropout(x) x = x.view(x.size(0), -1) x = self.fc(x) x = self.bn3(x) return", "def forward(self, images): _, f = self.resnet(images) h = f[3] # bs 2048", "self.bn1(out) out = self.prelu(out) out = self.conv2(out) out = self.bn2(out) if self.use_se: out", "class IRBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): super(IRBlock,", "6, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(args, **kwargs): model", "bias=False) self.bn1 = nn.BatchNorm2d(64) self.prelu = nn.PReLU() self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) self.layer1 =", "# bs 128 w/8 h/8 c = self.conv3(torch.cat((g, f[1]), 1)) c = self.bn3(c)", "args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model class ArcMarginModel(nn.Module): def __init__(self, args): super(ArcMarginModel, self).__init__() self.weight =", "nn.BatchNorm2d(512) self.dropout = nn.Dropout() if im_size == 112: self.fc = nn.Linear(512 * 7", "self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x)", "1] x = x.view(-1, 2048) # [N, 2048] x = self.fc(x) reg =", "None: residual = self.downsample(x) out += residual out = self.relu(out) return out class", "args): super(EastModel, self).__init__() if args.network == 'r18': self.resnet = resnet18(args) elif args.network ==", "self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out", "= torch.where(cosine > self.th, phi, cosine - self.mm) one_hot = torch.zeros(cosine.size(), device=device) one_hot.scatter_(1,", "nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def", "13:17]) return reg, expression, gender, glasses, race class FaceExpressionModel(nn.Module): def __init__(self): super(FaceExpressionModel, self).__init__()", "'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152':", "= self.conv7(h) # bs 32 w/4 h/4 g = self.bn7(g) g = self.relu(g)", "w/4 h/4 g = self.bn7(g) g = self.relu(g) score = self.conv8(g) # bs", "self.layer4(x) x = self.bn2(x) x = self.dropout(x) x = x.view(x.size(0), -1) x =", "self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 =", "2048 w/16 h/16 c = self.conv1(torch.cat((g, f[2]), 1)) c = self.bn1(c) c =", "self.conv7(h) # bs 32 w/4 h/4 g = self.bn7(g) g = self.relu(g) score", "= ResNet(IRBlock, [3, 4, 23, 3], use_se=args.use_se, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model", "Remove linear and pool layers (since we're not doing classification) modules = list(resnet.children())[:-1]", "= self.fc(x) x = self.bn3(x) return x def resnet18(args, **kwargs): model = ResNet(IRBlock,", "self.use_se: out = self.se(out) if self.downsample is not None: residual = self.downsample(x) out", "_make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or", "bs 32 w/4 h/4 h = self.bn6(h) h = self.relu(h) g = self.conv7(h)", "stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out)", "6, 3], use_se=args.use_se, im_size=args.im_size, **kwargs) if args.pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(args, **kwargs):", "self.unpool2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.unpool3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) def forward(self, images):", "x out = self.bn0(x) out = self.conv1(out) out = self.bn1(out) out = self.prelu(out)", "out = self.bn1(out) out = self.prelu(out) out = self.conv2(out) out = self.bn2(out) if", "!= 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes", "for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d) or isinstance(m," ]
[ "* from data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1) == 0 assert", "test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2) == 1 assert integer_log2(4) ==", "assert integer_log2(4) == 2 assert integer_log2(8) == 3 def test_index_store(): index_store = BasicIndex()", "integer_log2(1) == 0 assert integer_log2(2) == 1 assert integer_log2(4) == 2 assert integer_log2(8)", "== 1 assert integer_log2(4) == 2 assert integer_log2(8) == 3 def test_index_store(): index_store", "data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2) ==", "integer_log2(4) == 2 assert integer_log2(8) == 3 def test_index_store(): index_store = BasicIndex() verify_index(index_store)", "* def test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2) == 1 assert", "print() assert integer_log2(1) == 0 assert integer_log2(2) == 1 assert integer_log2(4) == 2", "import * from data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1) == 0", "== 0 assert integer_log2(2) == 1 assert integer_log2(4) == 2 assert integer_log2(8) ==", "0 assert integer_log2(2) == 1 assert integer_log2(4) == 2 assert integer_log2(8) == 3", "\"\"\" from data_pipe.basic_index import * from data_pipe_test.verify_index import * def test_log2(): print() assert", "import * def test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2) == 1", "assert integer_log2(2) == 1 assert integer_log2(4) == 2 assert integer_log2(8) == 3 def", "from data_pipe.basic_index import * from data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1)", "integer_log2(2) == 1 assert integer_log2(4) == 2 assert integer_log2(8) == 3 def test_index_store():", "data_pipe.basic_index import * from data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1) ==", "\"\"\" \"\"\" from data_pipe.basic_index import * from data_pipe_test.verify_index import * def test_log2(): print()", "def test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2) == 1 assert integer_log2(4)", "1 assert integer_log2(4) == 2 assert integer_log2(8) == 3 def test_index_store(): index_store =", "from data_pipe_test.verify_index import * def test_log2(): print() assert integer_log2(1) == 0 assert integer_log2(2)", "assert integer_log2(1) == 0 assert integer_log2(2) == 1 assert integer_log2(4) == 2 assert" ]
[ "}) return results def station(self, id): station = self._query('station/%s' % id) if station", "r.status_code != 204: return r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld API", "self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q))", "location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id,", "time import sleep from contextlib import closing import logging logger = logging.getLogger(__name__) class", "self._query('station/%s' % id) if station is None: logger.warning(\"empty response from API\") return station", "for location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id':", "}] countries = self._query('countries') if countries is not None: for location in sorted(countries,", "'schema': 'location', 'id': location['id'], 'text': location['name'] }) return results def rnd(self): station =", "from API\") return [] results = [] for station in sorted(stations, key=lambda sta:", "} def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is None or stations", "id): station = self._query('station/%s' % id) if station is None: logger.warning(\"empty response from", "station = self._query('station/%s' % id) if station is None: logger.warning(\"empty response from API\")", "station['id'], 'text': station['text'] }) return results def _query(self, path): uri = (self._base_uri %", "try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if", "return r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld API request for %s", "key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] })", "}) return results def rnd(self): station = self._query('station/rnd') if station is None: return", "key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] })", "id) if station is None: logger.warning(\"empty response from API\") return station def image(self,", "None: for location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location',", "sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results", "'location', 'id': location['id'], 'text': location['name'] }) return results def rnd(self): station = self._query('station/rnd')", "e: logger.info('RadioWorld API request for %s failed: %s' % (path, e)) return None", "else: sleep(0.25) except Exception as e: logger.info('RadioWorld API request for %s failed: %s'", "'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def station(self,", "= requests.Session() def root(self): results = [{ 'type': 'directory', 'schema': 'rnd', 'id': None,", "results def _query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld request: %s', uri)", "'id': station['id'], 'text': station['text'] }) return results def station(self, id): station = self._query('station/%s'", "station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results", "else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in stations: results.append({ 'type': 'track',", "if countries is not None: for location in sorted(countries, key=lambda loc: loc['name']): results.append({", "self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station", "in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text':", "def station(self, id): station = self._query('station/%s' % id) if station is None: logger.warning(\"empty", "is None: return [] return { 'type': 'track', 'schema': 'station', 'id': station['id'], 'text':", "as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204: return r.json()", "__init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results", "'Feeling lucky' }] countries = self._query('countries') if countries is not None: for location", "countries is not None: for location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type':", "id): stations = self._query('location/{}/stations'.format(id)) if stations is None or stations == []: logger.warning(\"empty", "logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session =", "if station is None: return [] return { 'type': 'track', 'schema': 'station', 'id':", "r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204: return r.json() else: sleep(0.25)", "r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204: return r.json() else:", "None: logger.warning(\"empty response from API\") return station def image(self, id): return self._base_uri %", "as e: logger.info('RadioWorld API request for %s failed: %s' % (path, e)) return", "[]: logger.warning(\"empty response from API\") return [] results = [] for station in", "station = self._query('station/rnd') if station is None: return [] return { 'type': 'track',", "logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204: return r.json() else: sleep(0.25) except", "search(self, q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id is None", "'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def station(self, id): station", "'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }] countries = self._query('countries') if", "logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri =", "None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in stations: results.append({ 'type':", "import closing import logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri =", "'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] }) return results def rnd(self): station", "uri) try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code)", "= (self._base_uri % path) logger.info('RadioWorld request: %s', uri) try: while True: with closing(self._session.get(uri))", "import requests from time import sleep from contextlib import closing import logging logger", "RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def", "return [] results = [] for station in sorted(stations, key=lambda sta: sta['text']): results.append({", "'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def _query(self, path):", "'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def _query(self,", "results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def", "station(self, id): station = self._query('station/%s' % id) if station is None: logger.warning(\"empty response", "'text': station['text'] }) return results def _query(self, path): uri = (self._base_uri % path)", "== []: logger.warning(\"empty response from API\") return [] results = [] for station", "'id': station['id'], 'text': station['text'] }) return results def _query(self, path): uri = (self._base_uri", "stations == []: logger.warning(\"empty response from API\") return [] results = [] for", "closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204: return", "q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id is None else", "in stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return", "% (\"station/{}/image\".format(id)) def search(self, q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if", "location['id'], 'text': location['name'] }) return results def rnd(self): station = self._query('station/rnd') if station", "(self._base_uri % path) logger.info('RadioWorld request: %s', uri) try: while True: with closing(self._session.get(uri)) as", "def rnd(self): station = self._query('station/rnd') if station is None: return [] return {", "None or stations == []: logger.warning(\"empty response from API\") return [] results =", "'rnd', 'id': None, 'text': 'Feeling lucky' }] countries = self._query('countries') if countries is", "'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results = [{ 'type':", "= [] for station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema':", "q)) stations = search['stations'] for station in stations: results.append({ 'type': 'track', 'schema': 'station',", "= [{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }] countries", "return { 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] } def stations(self,", "for station in stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text']", "station in stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] })", "sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name']", "'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] } def stations(self, id): stations =", "for station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id':", "station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is None or", "in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text':", "sleep(0.25) except Exception as e: logger.info('RadioWorld API request for %s failed: %s' %", "def _query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld request: %s', uri) try:", "r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld API request for %s failed:", "= self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for", "}) return results def _query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld request:", "'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def station(self, id):", "station['text'] }) return results def _query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld", "_query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld request: %s', uri) try: while", "path): uri = (self._base_uri % path) logger.info('RadioWorld request: %s', uri) try: while True:", "'station', 'id': station['id'], 'text': station['text'] }) return results def _query(self, path): uri =", "%s\", r.status_code) if r.status_code != 204: return r.json() else: sleep(0.25) except Exception as", "(\"station/{}/image\".format(id)) def search(self, q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id", "except Exception as e: logger.info('RadioWorld API request for %s failed: %s' % (path,", "id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results = [] search", "def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self):", "% id) if station is None: logger.warning(\"empty response from API\") return station def", "closing import logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s'", "import logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri", "'text': station['text'] }) return results def station(self, id): station = self._query('station/%s' % id)", "return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results = [] search =", "logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s'", "if stations is None or stations == []: logger.warning(\"empty response from API\") return", "def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results =", "self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results =", "= 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results = [{ 'type': 'directory', 'schema':", "return results def station(self, id): station = self._query('station/%s' % id) if station is", "is None: logger.warning(\"empty response from API\") return station def image(self, id): return self._base_uri", "= logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session", "class RadioWorld(object): def __init__(self): self._base_uri = 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session()", "results = [{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }]", "'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] }) return results def rnd(self):", "station['text'] }) return results def station(self, id): station = self._query('station/%s' % id) if", "from contextlib import closing import logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self):", "stations = self._query('location/{}/stations'.format(id)) if stations is None or stations == []: logger.warning(\"empty response", "contextlib import closing import logging logger = logging.getLogger(__name__) class RadioWorld(object): def __init__(self): self._base_uri", "[] for station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station',", "station is None: logger.warning(\"empty response from API\") return station def image(self, id): return", "'id': None, 'text': 'Feeling lucky' }] countries = self._query('countries') if countries is not", "station is None: return [] return { 'type': 'track', 'schema': 'station', 'id': station['id'],", "'station', 'id': station['id'], 'text': station['text'] }) return results def station(self, id): station =", "'text': 'Feeling lucky' }] countries = self._query('countries') if countries is not None: for", "stations = search['stations'] for station in stations: results.append({ 'type': 'track', 'schema': 'station', 'id':", "'id': station['id'], 'text': station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations", "not None: for location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema':", "'http://localhost:5000/%s' self._session = requests.Session() def root(self): results = [{ 'type': 'directory', 'schema': 'rnd',", "'text': location['name'] }) return results def rnd(self): station = self._query('station/rnd') if station is", "is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in stations: results.append({", "return results def _query(self, path): uri = (self._base_uri % path) logger.info('RadioWorld request: %s',", "while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code", "def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is None or stations ==", "= [] search = self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations", "{ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] } def stations(self, id):", "with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code != 204:", "station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'],", "return station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id):", "= self._query('location/{}/stations'.format(id)) if stations is None or stations == []: logger.warning(\"empty response from", "'id': location['id'], 'text': location['name'] }) return results def rnd(self): station = self._query('station/rnd') if", "results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q))", "return results def rnd(self): station = self._query('station/rnd') if station is None: return []", "sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text']", "'station', 'id': station['id'], 'text': station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if", "'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] } def stations(self, id): stations", "self._session = requests.Session() def root(self): results = [{ 'type': 'directory', 'schema': 'rnd', 'id':", "[] search = self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations =", "response: %s\", r.status_code) if r.status_code != 204: return r.json() else: sleep(0.25) except Exception", "search['stations'] for station in stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text':", "self._query('countries') if countries is not None: for location in sorted(countries, key=lambda loc: loc['name']):", "location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in stations:", "logger.warning(\"empty response from API\") return [] results = [] for station in sorted(stations,", "!= 204: return r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld API request", "response from API\") return station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def", "[] results = [] for station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type':", "'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }] countries = self._query('countries') if countries", "if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in", "response from API\") return [] results = [] for station in sorted(stations, key=lambda", "rnd(self): station = self._query('station/rnd') if station is None: return [] return { 'type':", "= self._query('station/rnd') if station is None: return [] return { 'type': 'track', 'schema':", "results def station(self, id): station = self._query('station/%s' % id) if station is None:", "API\") return station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q,", "logger.info('RadioWorld request: %s', uri) try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld", "countries = self._query('countries') if countries is not None: for location in sorted(countries, key=lambda", "search = self._query(\"stations/search/{}\".format(q)) if location_id is None else self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations']", "= search['stations'] for station in stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'],", "loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] }) return", "uri = (self._base_uri % path) logger.info('RadioWorld request: %s', uri) try: while True: with", "'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results def _query(self, path): uri", "requests.Session() def root(self): results = [{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text':", "is None or stations == []: logger.warning(\"empty response from API\") return [] results", "sta: sta['text']): results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return", "import sleep from contextlib import closing import logging logger = logging.getLogger(__name__) class RadioWorld(object):", "loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] }) return results", "location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'],", "return [] return { 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }", "if station is None: logger.warning(\"empty response from API\") return station def image(self, id):", "204: return r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld API request for", "<filename>mopidy_radioworld/radioworld.py import requests from time import sleep from contextlib import closing import logging", "def search(self, q, location_id): results = [] search = self._query(\"stations/search/{}\".format(q)) if location_id is", "%s', uri) try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\",", "results def rnd(self): station = self._query('station/rnd') if station is None: return [] return", "def root(self): results = [{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling", "location['name'] }) return results def rnd(self): station = self._query('station/rnd') if station is None:", "'text': station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is None", "if r.status_code != 204: return r.json() else: sleep(0.25) except Exception as e: logger.info('RadioWorld", "request: %s', uri) try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response:", "None, 'text': 'Feeling lucky' }] countries = self._query('countries') if countries is not None:", "= 'https://radioworld-api-prod.azurewebsites.net/%s' #self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results = [{", "station['id'], 'text': station['text'] }) return results def station(self, id): station = self._query('station/%s' %", "True: with closing(self._session.get(uri)) as r: r.raise_for_status() logger.debug(\"RadioWorld response: %s\", r.status_code) if r.status_code !=", "'schema': 'station', 'id': station['id'], 'text': station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id))", "path) logger.info('RadioWorld request: %s', uri) try: while True: with closing(self._session.get(uri)) as r: r.raise_for_status()", "#self._base_uri = 'http://localhost:5000/%s' self._session = requests.Session() def root(self): results = [{ 'type': 'directory',", "self._query('location/{}/stations'.format(id)) if stations is None or stations == []: logger.warning(\"empty response from API\")", "r.status_code) if r.status_code != 204: return r.json() else: sleep(0.25) except Exception as e:", "results = [] for station in sorted(stations, key=lambda sta: sta['text']): results.append({ 'type': 'track',", "sleep from contextlib import closing import logging logger = logging.getLogger(__name__) class RadioWorld(object): def", "stations is None or stations == []: logger.warning(\"empty response from API\") return []", "stations: results.append({ 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] }) return results", "= self._query('station/%s' % id) if station is None: logger.warning(\"empty response from API\") return", "None: return [] return { 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text']", "self._query('station/rnd') if station is None: return [] return { 'type': 'track', 'schema': 'station',", "station['id'], 'text': station['text'] } def stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is", "is not None: for location in sorted(countries, key=lambda loc: loc['name']): results.append({ 'type': 'directory',", "self._query(\"location/{}/search/{}\".format(location_id, q)) stations = search['stations'] for station in stations: results.append({ 'type': 'track', 'schema':", "'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }] countries = self._query('countries')", "[] return { 'type': 'track', 'schema': 'station', 'id': station['id'], 'text': station['text'] } def", "% path) logger.info('RadioWorld request: %s', uri) try: while True: with closing(self._session.get(uri)) as r:", "requests from time import sleep from contextlib import closing import logging logger =", "logger.warning(\"empty response from API\") return station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id))", "[{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky' }] countries =", "Exception as e: logger.info('RadioWorld API request for %s failed: %s' % (path, e))", "image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self, q, location_id): results = []", "or stations == []: logger.warning(\"empty response from API\") return [] results = []", "results.append({ 'type': 'directory', 'schema': 'location', 'id': location['id'], 'text': location['name'] }) return results def", "= self._query('countries') if countries is not None: for location in sorted(countries, key=lambda loc:", "stations(self, id): stations = self._query('location/{}/stations'.format(id)) if stations is None or stations == []:", "API\") return [] results = [] for station in sorted(stations, key=lambda sta: sta['text']):", "lucky' }] countries = self._query('countries') if countries is not None: for location in", "root(self): results = [{ 'type': 'directory', 'schema': 'rnd', 'id': None, 'text': 'Feeling lucky'", "from time import sleep from contextlib import closing import logging logger = logging.getLogger(__name__)", "from API\") return station def image(self, id): return self._base_uri % (\"station/{}/image\".format(id)) def search(self," ]
[ "getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr)", "exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for", "if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip", "('-d' in opt): getdead() elif ('h' in opt): usage() except: logging.getLogger(__name__).warning('ERROR:Illegal IP') usage()", "ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive()", "== ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead() else: for", "if ('-l' in opt): getlive() elif ('-d' in opt): getdead() elif ('h' in", "import socket import sys, getopt def getlive(): for i in live: print('live {0}'.format(i[0].dst))", "By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try:", "= getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in ip_range: if", "import logging logging.getLogger('scapy.runtime').setLevel(logging.ERROR) from scapy.all import * from netaddr import * import socket", "import * import socket import sys, getopt def getlive(): for i in live:", "if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or ip == ip_range.broadcast: continue", "opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in", "verbose=0) if not opts: getlive() getdead() else: for opt in opts: if ('-l'", "if not opts: getlive() getdead() else: for opt in opts: if ('-l' in", "getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l", "<ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args", "hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try:", "if ip == ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if", "{0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper", "timeout=1, verbose=0) if not opts: getlive() getdead() else: for opt in opts: if", "in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d", "def getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options]", "try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) !=", "opt): getlive() elif ('-d' in opt): getdead() elif ('h' in opt): usage() except:", "elif ('-d' in opt): getdead() elif ('h' in opt): usage() except: logging.getLogger(__name__).warning('ERROR:Illegal IP')", "usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--')", "[options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts,", "* import socket import sys, getopt def getlive(): for i in live: print('live", "def getlive(): for i in live: print('live {0}'.format(i[0].dst)) def getdead(): for j in", "ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead() else:", "j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live", "show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0]", "from scapy.all import * from netaddr import * import socket import sys, getopt", "pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1):", "in live: print('live {0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def", "target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32':", "import sys, getopt def getlive(): for i in live: print('live {0}'.format(i[0].dst)) def getdead():", "print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead", "usage() try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip", "in opts: if ('-l' in opt): getlive() elif ('-d' in opt): getdead() elif", "not opts: getlive() getdead() else: for opt in opts: if ('-l' in opt):", "getlive() elif ('-d' in opt): getdead() elif ('h' in opt): usage() except: logging.getLogger(__name__).warning('ERROR:Illegal", "logging logging.getLogger('scapy.runtime').setLevel(logging.ERROR) from scapy.all import * from netaddr import * import socket import", "for i in live: print('live {0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead", "import * from netaddr import * import socket import sys, getopt def getlive():", "!= str(ip)+'/32': if ip == ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1,", "show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args =", "#!/usr/bin/python import logging logging.getLogger('scapy.runtime').setLevel(logging.ERROR) from scapy.all import * from netaddr import * import", "hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage()", "for j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show", "== ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts:", "scapy.all import * from netaddr import * import socket import sys, getopt def", "or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead()", "opt in opts: if ('-l' in opt): getlive() elif ('-d' in opt): getdead()", "from netaddr import * import socket import sys, getopt def getlive(): for i", "in opt): getlive() elif ('-d' in opt): getdead() elif ('h' in opt): usage()", "str(ip)+'/32': if ip == ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0)", "ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or ip == ip_range.broadcast:", "getopt def getlive(): for i in live: print('live {0}'.format(i[0].dst)) def getdead(): for j", "getdead() else: for opt in opts: if ('-l' in opt): getlive() elif ('-d'", "dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except:", "('-l' in opt): getlive() elif ('-d' in opt): getdead() elif ('h' in opt):", "for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or", "ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or ip", "in opt): getdead() elif ('h' in opt): usage() except: logging.getLogger(__name__).warning('ERROR:Illegal IP') usage() else:", "getlive() getdead() else: for opt in opts: if ('-l' in opt): getlive() elif", "<NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target)", "i in live: print('live {0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead {0}'.format(j[0].dst))", "continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead() else: for opt in", "in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or ip ==", "{0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development", "ip == ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not", "live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead() else: for opt in opts:", "ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(), timeout=1, verbose=0) if not opts: getlive() getdead() else: for opt", "print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit()", "try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip ==", "except: usage() try: ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if", "def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show dead hosts\\n\\n--Development By", "live hosts\\n\\t-d show dead hosts\\n\\n--Development By <NAME>--') exit() if(len(sys.argv)>1): opts, args = getopt.getopt(sys.argv[1:],'ldh')", "str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network or ip == ip_range.broadcast: continue live,dead=sr(IP(dst=str(ip))/ICMP(),", "print('live {0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage:", "socket import sys, getopt def getlive(): for i in live: print('live {0}'.format(i[0].dst)) def", "dead: print('dead {0}'.format(j[0].dst)) def usage(): print('Usage: pingsweeper [options] <ip>\\n\\t-l show live hosts\\n\\t-d show", "else: for opt in opts: if ('-l' in opt): getlive() elif ('-d' in", "logging.getLogger('scapy.runtime').setLevel(logging.ERROR) from scapy.all import * from netaddr import * import socket import sys,", "for opt in opts: if ('-l' in opt): getlive() elif ('-d' in opt):", "netaddr import * import socket import sys, getopt def getlive(): for i in", "getlive(): for i in live: print('live {0}'.format(i[0].dst)) def getdead(): for j in dead:", "opts: if ('-l' in opt): getlive() elif ('-d' in opt): getdead() elif ('h'", "args = getopt.getopt(sys.argv[1:],'ldh') try: target=args[0] except: usage() try: ip_range=IPNetwork(target) for ip in ip_range:", "opt): getdead() elif ('h' in opt): usage() except: logging.getLogger(__name__).warning('ERROR:Illegal IP') usage() else: usage()", "* from netaddr import * import socket import sys, getopt def getlive(): for", "sys, getopt def getlive(): for i in live: print('live {0}'.format(i[0].dst)) def getdead(): for", "opts: getlive() getdead() else: for opt in opts: if ('-l' in opt): getlive()", "ip_range=IPNetwork(target) for ip in ip_range: if str(ip_range.cidr) != str(ip)+'/32': if ip == ip_range.network", "<filename>pingsweeper.py #!/usr/bin/python import logging logging.getLogger('scapy.runtime').setLevel(logging.ERROR) from scapy.all import * from netaddr import *", "live: print('live {0}'.format(i[0].dst)) def getdead(): for j in dead: print('dead {0}'.format(j[0].dst)) def usage():" ]
[ "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc =", "\"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and", "parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30,", "governing permissions and # limitations under the License. # # pylint: disable=not-callable, line-too-long,", "\"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild", "result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start)))", "subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end = time.time()", "/ \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api)", "default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default is", "= subprocess.run(command) start = time.time() if result.returncode == 0: if args.target == \"all\":", "= Path() cxx: Path = Path() if args.arch == \"aarch64\": cc = toolchain", "this file except in compliance with the License. # You may obtain a", "specific language governing permissions and # limitations under the License. # # pylint:", "from pathlib import Path def format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute", "targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\")", "0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command =", "help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for", "help=\"run N jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets", "ANY KIND, either express or implied. # See the License for the specific", "in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2", "Lzhiyong # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "argparse import subprocess from pathlib import Path def format_time(seconds): minute, sec = divmod(seconds,", "> 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if result.returncode == 0:", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk) if not", "/ \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api)", "if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result", "str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain", "cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" /", "ValueError(\"error: cannot find the clang compiler\") # start building build(str(cc), str(cxx), args) def", "\"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)])", "args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success cost", "choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set", "{}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk) if not ndk.exists() or not", "is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2 adb fastboot, etc\")", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc", "/ \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain", "required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build", "python # # Copyright © 2022 Github Lzhiyong # # Licensed under the", "under the License. # # pylint: disable=not-callable, line-too-long, no-else-return import time import argparse", "# # pylint: disable=not-callable, line-too-long, no-else-return import time import argparse import subprocess from", "ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified", "OF ANY KIND, either express or implied. # See the License for the", "\"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain /", "not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error:", "= ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path = Path() if", "cxx: Path = Path() if args.arch == \"aarch64\": cc = toolchain / \"bin\"", "toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else:", "\"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path = Path() if args.arch == \"aarch64\":", "platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16,", "help=\"build specified targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host", "minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args):", "result = subprocess.run(command) start = time.time() if result.returncode == 0: if args.target ==", "# Copyright © 2022 Github Lzhiyong # # Licensed under the Apache License,", "limitations under the License. # # pylint: disable=not-callable, line-too-long, no-else-return import time import", "== 0: if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)])", "= argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\",", "result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end", "\"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\",", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "the clang compiler\") # start building build(str(cc), str(cxx), args) def main(): parser =", "is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\") #", "# # Copyright © 2022 Github Lzhiyong # # Licensed under the Apache", "== \"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain /", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "configure(args): ndk = Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find", "time import argparse import subprocess from pathlib import Path def format_time(seconds): minute, sec", "import subprocess from pathlib import Path def format_time(seconds): minute, sec = divmod(seconds, 60)", "time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "find the clang compiler\") # start building build(str(cc), str(cxx), args) def main(): parser", "or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\"", "subprocess.run(command) start = time.time() if result.returncode == 0: if args.target == \"all\": result", "divmod(seconds, 60) hour, minute = divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour,", "cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" /", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# start building build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True,", "compiler\") # start building build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\",", "start building build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set", "required by applicable law or agreed to in writing, software # distributed under", "if not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise", "applicable law or agreed to in writing, software # distributed under the License", "pylint: disable=not-callable, line-too-long, no-else-return import time import argparse import subprocess from pathlib import", "return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc),", "\"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc)) > 0:", "subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j", "default=\"all\", help=\"build specified targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the", "args.arch == \"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain", "args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\")", "or agreed to in writing, software # distributed under the License is distributed", "= Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\")", "parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "= Path() if args.arch == \"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api)", "help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api", "find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx:", "if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain =", "as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "{}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\") # start", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "\"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx))", "License. # You may obtain a copy of the License at # #", "cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk) if not ndk.exists()", "compliance with the License. # You may obtain a copy of the License", "cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc =", "args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if", "parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2 adb", "Copyright © 2022 Github Lzhiyong # # Licensed under the Apache License, Version", "# pylint: disable=not-callable, line-too-long, no-else-return import time import argparse import subprocess from pathlib", "/ \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path = Path() if args.arch ==", "fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args = parser.parse_args() configure(args) if", "help=\"set android platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\")", "time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk) if not ndk.exists() or", "toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api)", "\"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc))", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\",", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the", "disable=not-callable, line-too-long, no-else-return import time import argparse import subprocess from pathlib import Path", "\"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch ==", "not use this file except in compliance with the License. # You may", "parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build", "== \"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain /", "cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang", "\"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc =", "60) hour, minute = divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute,", "def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch),", "is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in", "specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api is 30\") parser.add_argument(\"--build\",", "License, Version 2.0 (the \"License\"); # you may not use this file except", "jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as", "def format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute = divmod(minute, 60) if", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "if args.protoc is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command)", "sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc,", "/ \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain / \"bin\"", "License. # # pylint: disable=not-callable, line-too-long, no-else-return import time import argparse import subprocess", "# you may not use this file except in compliance with the License.", "= toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain", "= time.time() if result.returncode == 0: if args.target == \"all\": result = subprocess.run([\"ninja\",", "agreed to in writing, software # distributed under the License is distributed on", "\"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx", "0: if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else:", "= toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api)", "args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success", "(the \"License\"); # you may not use this file except in compliance with", "the host protoc path\") args = parser.parse_args() configure(args) if __name__ == \"__main__\": main()", "\"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists()", "\"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not", "# Unless required by applicable law or agreed to in writing, software #", "etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args = parser.parse_args() configure(args) if __name__", "android platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\",", "by applicable law or agreed to in writing, software # distributed under the", "help=\"set the host protoc path\") args = parser.parse_args() configure(args) if __name__ == \"__main__\":", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute,", "\"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc", "> 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command", "sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B", "return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\",", "/ \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc", "\"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level,", "file except in compliance with the License. # You may obtain a copy", "import argparse import subprocess from pathlib import Path def format_time(seconds): minute, sec =", "elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx,", "\"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc))", "start))) def configure(args): ndk = Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise", "not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time()", "License for the specific language governing permissions and # limitations under the License.", "else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build),", "to in writing, software # distributed under the License is distributed on an", "{}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode ==", "implied. # See the License for the specific language governing permissions and #", "ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path =", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def", "args.protoc is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start", "not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain = ndk", "Path = Path() if args.arch == \"aarch64\": cc = toolchain / \"bin\" /", "\"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\",", "args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc", "\"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\"", "or implied. # See the License for the specific language governing permissions and", "\"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain / \"bin\" /", "= subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target,", "cc: Path = Path() cxx: Path = Path() if args.arch == \"aarch64\": cc", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "if args.arch == \"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx =", "in writing, software # distributed under the License is distributed on an \"AS", "minute, sec = divmod(seconds, 60) hour, minute = divmod(minute, 60) if hour >", "import time import argparse import subprocess from pathlib import Path def format_time(seconds): minute,", "permissions and # limitations under the License. # # pylint: disable=not-callable, line-too-long, no-else-return", "© 2022 Github Lzhiyong # # Licensed under the Apache License, Version 2.0", "default=30, help=\"set android platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build", "result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk = Path(args.ndk) if", "{}\".format(args.job)]) if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end", "args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result =", "pathlib import Path def format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute =", "argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"],", "path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\",", "the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path", "build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk", "end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk", "toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain /", "toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path = Path()", "the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "subprocess from pathlib import Path def format_time(seconds): minute, sec = divmod(seconds, 60) hour,", "ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain = ndk /", "\"{:.2f}s\".format(sec) def build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx),", "\"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\"", "parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args = parser.parse_args() configure(args) if __name__ ==", "len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if result.returncode ==", "not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc:", "such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args", "print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\") # start building", "\"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\"", "time.time() if result.returncode == 0: if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\",", "clang compiler\") # start building build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser()", "default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2 adb fastboot,", "use this file except in compliance with the License. # You may obtain", "command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is", "adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args = parser.parse_args() configure(args)", "toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if", "parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\",", "for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api is", "min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start =", "result.returncode == 0: if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j", "2.0 (the \"License\"); # you may not use this file except in compliance", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the ndk toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\",", "import Path def format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute = divmod(minute,", "for the specific language governing permissions and # limitations under the License. #", "16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\",", "specified targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "[\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None", "# # Unless required by applicable law or agreed to in writing, software", "\"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx", "express or implied. # See the License for the specific language governing permissions", "sec = divmod(seconds, 60) hour, minute = divmod(minute, 60) if hour > 0:", "if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return", "either express or implied. # See the License for the specific language governing", "/ \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not", "/ \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is", "= toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api)", "toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif", "args.arch == \"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "ValueError(\"cannot find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path()", "30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel,", "raise ValueError(\"cannot find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path =", "\"-j {}\".format(args.job)]) if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success cost time:", "/ \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain", "Github Lzhiyong # # Licensed under the Apache License, Version 2.0 (the \"License\");", "toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc is", "the License. # You may obtain a copy of the License at #", "/ \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain / \"bin\"", "= [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "= toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api)", "\"i686-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain /", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "\"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result", "== 0: end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def", "parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default", "= subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0: end =", "parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such as aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set", "build(cc, cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"]", "with the License. # You may obtain a copy of the License at", "cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists():", "command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if result.returncode == 0: if args.target", "default=16, help=\"run N jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "= toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" /", "level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run", "the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api is 30\")", "else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\"", "aapt2 adb fastboot, etc\") parser.add_argument(\"--protoc\", help=\"set the host protoc path\") args = parser.parse_args()", "if result.returncode == 0: if args.target == \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build,", "\"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode", "law or agreed to in writing, software # distributed under the License is", "0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if result.returncode == 0: if", "the License for the specific language governing permissions and # limitations under the", "no-else-return import time import argparse import subprocess from pathlib import Path def format_time(seconds):", "cxx, args): command = [\"cmake\", \"-GNinja\", \"-B {}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "\"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result =", "and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if result.returncode", "toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\": cc = toolchain /", "/ \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch", "ndk = Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the", "directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\",", "- start))) def configure(args): ndk = Path(args.ndk) if not ndk.exists() or not ndk.is_dir():", "== \"all\": result = subprocess.run([\"ninja\", \"-C\", args.build, \"-j {}\".format(args.job)]) else: result = subprocess.run([\"ninja\",", "help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default is 16\")", "ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path = Path() cxx: Path = Path() if args.arch", "cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" /", "hour, minute = divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec)", "start = time.time() if result.returncode == 0: if args.target == \"all\": result =", "/ \"x86_64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or", "def configure(args): ndk = Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot", "divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute >", "in compliance with the License. # You may obtain a copy of the", "else: result = subprocess.run([\"ninja\", \"-C\", args.build, args.target, \"-j {}\".format(args.job)]) if result.returncode == 0:", "Path(args.ndk) if not ndk.exists() or not ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "args.arch == \"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain / \"bin\" /", "is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\") # start building build(str(cc),", "0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else:", "See the License for the specific language governing permissions and # limitations under", "== \"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx = toolchain /", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "/ \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api) elif args.arch == \"arm\":", "build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs in parallel, default is 16\") parser.add_argument(\"--target\",", "format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute = divmod(minute, 60) if hour", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find", "{}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\") # start building build(str(cc), str(cxx),", "= toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\": cc = toolchain", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "\"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return \"{:.2f}s\".format(sec)", "/ \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch", "toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif", "cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" /", "= toolchain / \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc", "\"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx =", "None and len(str(args.protoc)) > 0: command.append(\"-DPROTOC_PATH={}\".format(args.protoc)) result = subprocess.run(command) start = time.time() if", "architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\",", "minute = divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif", "return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec) else: return", "= divmod(minute, 60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute", "2022 Github Lzhiyong # # Licensed under the Apache License, Version 2.0 (the", "and # limitations under the License. # # pylint: disable=not-callable, line-too-long, no-else-return import", "cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot", "= divmod(seconds, 60) hour, minute = divmod(minute, 60) if hour > 0: return", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "\"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch ==", "parser.add_argument(\"--api\", default=30, help=\"set android platform level, min api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the", "Path def format_time(seconds): minute, sec = divmod(seconds, 60) hour, minute = divmod(minute, 60)", "line-too-long, no-else-return import time import argparse import subprocess from pathlib import Path def", "0: end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args):", "cannot find the clang compiler\") # start building build(str(cc), str(cxx), args) def main():", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "toolchain path\") parser.add_argument(\"--arch\", choices=[\"aarch64\", \"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\")", "> 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0: return \"{}m{:02d.2f}s\".format(minute, sec)", "#!/usr/bin/env python # # Copyright © 2022 Github Lzhiyong # # Licensed under", "ndk.is_dir(): raise ValueError(\"cannot find the ndk\") toolchain = ndk / \"toolchains/llvm/prebuilt/linux-x86_64\" cc: Path", "elif args.arch == \"x86\": cc = toolchain / \"bin\" / \"i686-linux-android{}-clang\".format(args.api) cxx =", "Path = Path() cxx: Path = Path() if args.arch == \"aarch64\": cc =", "N jobs in parallel, default is 16\") parser.add_argument(\"--target\", default=\"all\", help=\"build specified targets such", "raise ValueError(\"error: cannot find the clang compiler\") # start building build(str(cc), str(cxx), args)", "# limitations under the License. # # pylint: disable=not-callable, line-too-long, no-else-return import time", "= toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"aarch64-linux-android{}-clang++\".format(args.api)", "Path() cxx: Path = Path() if args.arch == \"aarch64\": cc = toolchain /", "/ \"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc))", "\"bin\" / \"x86_64-linux-android{}-clang++\".format(args.api) if not cc.exists() or not cxx.exists(): print(\"cc is {}\".format(cc)) print(\"cxx", "the License. # # pylint: disable=not-callable, line-too-long, no-else-return import time import argparse import", "60) if hour > 0: return \"{}h{:02d}m{:02d.2f}s\".format(hour, minute, sec) elif minute > 0:", "the specific language governing permissions and # limitations under the License. # #", "/ \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang++\".format(args.api) elif args.arch == \"x86\":", "print(\"cc is {}\".format(cc)) print(\"cxx is {}\".format(cxx)) raise ValueError(\"error: cannot find the clang compiler\")", "language governing permissions and # limitations under the License. # # pylint: disable=not-callable,", "cxx = toolchain / \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\"", "/ \"bin\" / \"i686-linux-android{}-clang++\".format(args.api) else: cc = toolchain / \"bin\" / \"x86_64-linux-android{}-clang\".format(args.api) cxx", "= time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end - start))) def configure(args): ndk =", "\"arm\", \"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "\"x86\", \"x86_64\"], required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform", "building build(str(cc), str(cxx), args) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"--ndk\", required=True, help=\"set the", "required=True, help=\"build for the specified architecture\") parser.add_argument(\"--api\", default=30, help=\"set android platform level, min", "elif args.arch == \"arm\": cc = toolchain / \"bin\" / \"armv7a-linux-androideabi{}-clang\".format(args.api) cxx =", "api is 30\") parser.add_argument(\"--build\", default=\"build\", help=\"the build directory\") parser.add_argument(\"--job\", default=16, help=\"run N jobs", "Path() if args.arch == \"aarch64\": cc = toolchain / \"bin\" / \"aarch64-linux-android{}-clang\".format(args.api) cxx", "{}\".format(args.build), \"-DCMAKE_C_COMPILER={}\".format(cc), \"-DCMAKE_CXX_COMPILER={}\".format(cxx), \"-DTARGET_ABI={}\".format(args.arch), \"-DCMAKE_BUILD_TYPE=Release\"] if args.protoc is not None and len(str(args.protoc)) >", "if result.returncode == 0: end = time.time() print(\"\\033[1;32mbuild success cost time: {}\\033[0m\".format(format_time(end -" ]
[ "_preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before", "}}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{", "predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\":", "predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with", "from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import", "model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data)", "f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data", "with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data =", "target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] =", "train_test_split from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score", "\"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True,", "train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\",", "= \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split():", "] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with", "train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\",", "}}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f:", "import LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from", "metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1),", "start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split", "from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from", "target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds", "{processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data =", "python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator(", "pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data", "dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag:", "as f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target =", "data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, )", "Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data)", "airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{", "with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This", "processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\")", "f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds", "dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >> train_val_split >>", "Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split", "PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data", "synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag,", "\"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression()", "from airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df =", "from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import days_ago def", "transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed", "airflow.operators.python import PythonOperator import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline", "\"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{", "import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import", "import pathlib from datetime import timedelta from airflow import DAG from airflow.operators.python import", "data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\",", "= test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{", "roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as", "[ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat',", "[\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model =", "data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data,", "task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >>", "}}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"]", "test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >> train_val_split >> train_model >>", "\"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{", "OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model", "}}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics", "( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot',", "airflow import DAG from airflow.operators.python import PythonOperator import pandas as pd from sklearn.compose", "json from airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df", ") test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >> train_val_split >> train_model", "index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8)", "data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path =", "from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from", "model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\",", "( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ]", "DAG from airflow.operators.python import PythonOperator import pandas as pd from sklearn.compose import ColumnTransformer", "sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection", "pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f)", "}}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\":", ") as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator(", "}}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model", "= LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\")", "test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds", "_train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer =", "\"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\",", "python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >> train_val_split", "accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True)", "\"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data,", ") train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model,", "{ \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds", "model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\",", "LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates", "preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag", "test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\",", "ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"]", "timedelta from airflow import DAG from airflow.operators.python import PythonOperator import pandas as pd", "sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model", "\"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target,", "f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def", "as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import", "target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer =", "roc_auc_score import json from airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds", "schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split =", "model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts),", "days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\")", "pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target,", "python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator(", "= PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag )", "from airflow import DAG from airflow.operators.python import PythonOperator import pandas as pd from", "pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder", "target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model,", "ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\")", "processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def", "\"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\",", "f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True)", "= train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\",", "train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag", "), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ),", "pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{", "train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag", "def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{", "pathlib from datetime import timedelta from airflow import DAG from airflow.operators.python import PythonOperator", "\"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds", "ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num',", "to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data", "ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with", "def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model", "pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file)", "import timedelta from airflow import DAG from airflow.operators.python import PythonOperator import pandas as", "= PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag ) preprocess_data >> train_val_split >> train_model >> test_model", "train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data", "= train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model():", "import json from airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\")", "= pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False)", "ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds", "'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]),", "\"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model on", "f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"]", "}}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path,", "sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import days_ago def _preprocess_data():", "}}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\",", "transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = {", "\"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data", "pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing", "import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import", "= pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df", "description=\"This DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data", "exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\",", "\"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True,", "PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model", "dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split,", "{data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True,", "ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as", "exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds", "= PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag )", "= pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [", "}}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG(", "sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import pickle", "task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model =", "ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True)", "= model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target,", "open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG", "}}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\")", "import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import", "train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False)", "model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f:", "pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\",", "predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\",", "import train_test_split from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score,", "\"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\")", "from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import", "}}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer", "\"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target", "dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\",", ") train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model,", "\"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"],", "transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds", "ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds", "ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data)", "pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import days_ago", "pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"],", "with DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), )", "sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing", ") transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{", "index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target", "as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic", "= { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{", "metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts), \"roc_auc\": roc_auc_score(target, predicts), }", "ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler", "pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to", "open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\")", "print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\"", "def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer", "pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model():", "PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model", "data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True)", "OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression", "import PythonOperator import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import", "data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data,", "}}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\")", "StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics", "}}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform:", "Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\",", "}}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"],", "}}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{", "print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\")", "def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data", "\"exang\", \"slope\", \"ca\", \"thal\"], ), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target)", "pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts =", "metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\",", "{data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data", "as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data, dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\",", "test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def", "f1_score, roc_auc_score import json from airflow.utils.dates import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{", "inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path", "= ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ),", "\"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\",", "data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform:", "\"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer,", "from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import", "} pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics,", "from datetime import timedelta from airflow import DAG from airflow.operators.python import PythonOperator import", "ds }}/metrics.json\", \"w\") as metric_file: json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains", "transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds", "transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"],", "before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{", "'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\", \"exang\", \"slope\", \"ca\", \"thal\"], ), ] )", "datetime import timedelta from airflow import DAG from airflow.operators.python import PythonOperator import pandas", "= target_df print(f\"data after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{", "train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer(", "ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data =", "accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import days_ago def _preprocess_data(): data_df =", "), ] ) transformer.fit_transform(train_data) model = LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True)", "open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{", "[\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\", \"restecg\",", "print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False) def _train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds", "}}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler',", "import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import days_ago def _preprocess_data(): data_df", "test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target =", "import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json from airflow.utils.dates import", "pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ (", "= pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\")", "sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import json", "import StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import pickle from", "_train_val_split(): data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/data.csv\") train_data, test_data = train_test_split(data, train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds", "index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\") target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True)", "ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), (", "dag=dag, ) train_val_split = PythonOperator( task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\",", "train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data = pd.read_csv(\"/opt/airflow/data/processed/{{", "ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\"))", "inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\",", "inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\"))", "task_id=\"split_data\", python_callable=_train_val_split, dag=dag ) train_model = PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model =", "_test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model =", "ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f) def _test_model(): test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds", "DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data =", "ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with", "import DAG from airflow.operators.python import PythonOperator import pandas as pd from sklearn.compose import", "target = train_data[\"target\"] train_data.drop(columns=[\"target\"], inplace=True) transformer = ColumnTransformer( [ ( 'num', Pipeline([('scaler', StandardScaler())]),", "trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator(", "= PythonOperator( task_id=\"train_model\", python_callable=_train_model, dag=dag ) test_model = PythonOperator( task_id=\"test_model\", python_callable=_test_model, dag=dag )", "exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving processed data to {processed_path}\") data_df.to_csv(processed_path, index=False)", "train_size=0.8) train_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/train.csv\", index=False) test_data.to_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\", index=False) def _train_model(): train_data =", "predicts), } pathlib.Path(\"/opt/airflow/data/metrics/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/metrics/{{ ds }}/metrics.json\", \"w\") as metric_file:", "json.dump(metrics, metric_file) with DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\", start_date=days_ago(0),", "from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from", "import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from", "ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"], inplace=True) data_df[\"target\"] = target_df print(f\"data after", "= pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{ ds", "transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts), \"f1\": f1_score(target, predicts),", "as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"wb\") as f: pickle.dump(transformer, f)", "test_data = pd.read_csv(\"/opt/airflow/data/processed/{{ ds }}/test.csv\") target = test_data[\"target\"] test_data.drop(columns=[\"target\"], inplace=True) model = pickle.load(open(\"/opt/airflow/data/models/{{", "with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as f: pickle.dump(model, f) with open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\",", "DAG( dag_id=\"model_train\", description=\"This DAG trains model on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as", "import days_ago def _preprocess_data(): data_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds", "pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/data.csv\") target_df = pd.read_csv(\"/opt/airflow/data/raw/{{ ds }}/target.csv\") print(f\"data before transform: {data_df}\") data_df.drop(columns=[\"fbs\"],", "after transform: {data_df}\") pathlib.Path(\"/opt/airflow/data/processed/{{ ds }}\").mkdir(parents=True, exist_ok=True) processed_path = \"/opt/airflow/data/processed/{{ ds }}/data.csv\" print(f\"saving", "= pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\":", "from airflow.operators.python import PythonOperator import pandas as pd from sklearn.compose import ColumnTransformer from", "PythonOperator import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline", "StandardScaler())]), [\"age\", \"trestbps\", \"chol\", \"thalach\", \"oldpeak\"], ), ( 'cat', Pipeline([('onehot', OneHotEncoder(handle_unknown='ignore'))]), [\"sex\", \"cp\",", "\"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics =", "sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression import pickle from sklearn.metrics import accuracy_score,", "= pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"rb\")) transformer = pickle.load(open(\"/opt/airflow/data/models/{{ ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts", "LogisticRegression() model.fit(train_data, target) pathlib.Path(\"/opt/airflow/data/models/{{ ds }}\").mkdir(parents=True, exist_ok=True) with open(\"/opt/airflow/data/models/{{ ds }}/model.pkl\", \"wb\") as", "ds }}/transformer.pkl\", \"rb\")) transformer.transform(test_data) predicts = model.predict(test_data) metrics = { \"accuracy\": accuracy_score(target, predicts),", "on synthetic data\", start_date=days_ago(0), schedule_interval=timedelta(days=1), ) as dag: preprocess_data = PythonOperator( task_id=\"data_preprocessing\", python_callable=_preprocess_data," ]
[ "with open(path, 'r') as f: config = json.load(f) return config def make_config(paths): configs", "process pool and getting results as completed. Not necessarily in order. # async", "math.ceil(8/workers) due to process overhead. # # # Output: # # (1, -2)", "cpu_bound_func, list(range(8)), b=2)): # result = await _ # print(result) # # start", "List of asyncio.Futures # # Examples: # # .. code-block:: python # :linenos:", "for key, val in newconfig.items(): configs[business_driver][key] = val return configs def load_model(path: str,", "dict) -> Tuple[dict, dict]: \"\"\"values in the arb field exepected to be a", "{} newconfig = load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] = val return", "not in configs: configs[business_driver] = {} newconfig = load_config(fp) for key, val in", "next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break pool.shutdown() # not", "#os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver] = {} newconfig =", "# async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): #", "order. # async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)):", "= await _ # print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool()) #", "**value))) # iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func:", "future is then put on the write_queue. If queue's requests are completed and", "each process # args: additional values passed to every process # kwargs: additional", "overhead. # # # Output: # # (1, -2) # (0, -2) #", "int, # func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\"", "arb: msg['arb'] = arb return msg def load_config(path: str) -> dict: \"\"\" Provide", "config def make_config(paths): configs = {} for fp in paths: business_driver = Path(fp).parent.stem", "as pool: # return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in", "return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in iterable] # #", "concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop()", "Provide path to load the JSON config Args: path: str, should be path", "Examples: # # .. code-block:: python # :linenos: # # def cpu_bound_func(a, b=b):", "could be refactored by async_queue.worker Args: next_queue: queue containing the responses write_queue: queue", "to be a dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb') return arb,", "# (4, -2) # (16, -2) # (25, -2) # (36, -2) #", "json.load(f) return config def make_config(paths): configs = {} for fp in paths: business_driver", "Pool # func: function # iterable: unique values you will pass to each", "func: function to use in the process pool. This is self.parse Returns: None", "\"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks", "Client configs will overwrite matching keys in the Driver config. Args: driver (dict):", "for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not", "for _ in iterable] # # # submitting futures to the process pool", "Returns: Any JSON-serializable data. Usually a dict for the config files. \"\"\" with", "# # def cpu_bound_func(a, b=b): # # CPU-bound operations will block the event", "(16, -2) # (25, -2) # (36, -2) # # .. todo:: make", "pass to each process # args: additional values passed to every process #", "# submitting futures to the process pool and getting results as completed. Not", "finish this task func: function to use in the process pool. This is", "when to finish this task func: function to use in the process pool.", "has completed (i.e. no unfinished tasks in either queue), then break. .. todo::", "If queue's requests are completed and next_queue has completed (i.e. no unfinished tasks", "b=b): # # CPU-bound operations will block the event loop: # # in", "asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _ , *args,", "# # Args: # workers: Number of workers in the Process Pool #", "partial(func, **value)) for value in iterable] # elif isinstance(iterable, asyncio.Queue): # # todo", "write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks", "\"\"\" Merge Driver and Client config. The Client configs will overwrite matching keys", "additional values passed to every process # kwargs: additional values passed to every", "a future. The future is then put on the write_queue. If queue's requests", "-2) # (0, -2) # (9, -2) # (4, -2) # (16, -2)", "use in the process pool. This is self.parse Returns: None \"\"\" pool =", "# result = await _ # print(result) # # start = time.time() #", "str, mode: str = 'rb', response_encoding=None): with open(path, mode) as f: model =", "import asyncio import concurrent.futures from functools import partial from datetime import datetime, timedelta", "# # Output: # # (1, -2) # (0, -2) # (9, -2)", "the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue", ".. code-block:: python # :linenos: # # def cpu_bound_func(a, b=b): # # CPU-bound", "longer than math.ceil(8/workers) due to process overhead. # # # Output: # #", "(1, -2) # (0, -2) # (9, -2) # (4, -2) # (16,", "# for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await _", "code-block:: python # :linenos: # # def cpu_bound_func(a, b=b): # # CPU-bound operations", "in the Process Pool # func: function # iterable: unique values you will", "# loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func,", "def load_config(path: str) -> dict: \"\"\" Provide path to load the JSON config", "workers = cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: #", "next_queue: queue containing the responses write_queue: queue to put the list of asyncio.Futures", "Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count import pickle from deprecated.sphinx import", "put on the write_queue. If queue's requests are completed and next_queue has completed", "next_queue.empty(): value = await next_queue.get() if not func: func = value.pop('parse_func') futs =", "Callable, Tuple, Optional from multiprocessing import cpu_count import pickle from deprecated.sphinx import deprecated", "# # .. code-block:: python # :linenos: # # def cpu_bound_func(a, b=b): #", "= msg.pop('arb') return arb, msg return {}, msg def set_arb(msg: dict, arb: dict)", "*args, **kwargs) -> list: # if workers <= 0: # workers = cpu_count()", "newconfig.items(): configs[business_driver][key] = val return configs def load_model(path: str, mode: str = 'rb',", "str = 'rb', response_encoding=None): with open(path, mode) as f: model = pickle.load(f) return", "and Client config. The Client configs will overwrite matching keys in the Driver", "\"\"\" # Pass an iterable to a process pool and return a list", "block the event loop: # # in general it is preferable to run", "if workers <= 0: # workers = cpu_count() # loop = asyncio.get_running_loop() #", "parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the response html", "# # submitting futures to the process pool and getting results as completed.", "# return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in iterable] #", "configs.base import consts from core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict)", "def set_arb(msg: dict, arb: dict) -> dict: if arb: msg['arb'] = arb return", "iterable] # # # submitting futures to the process pool and getting results", "while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get() if not", "= concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop =", "asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor", ":linenos: # # def cpu_bound_func(a, b=b): # # CPU-bound operations will block the", "passing to the Process Pool and returing a future. The future is then", "unique values you will pass to each process # args: additional values passed", "Pass an iterable to a process pool and return a list of asyncio", "the Process Pool # func: function # iterable: unique values you will pass", "await next_queue.get() if not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value))", "to the process pool and getting results as completed. Not necessarily in order.", "html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue is empty.", "function to use in the process pool. This is self.parse Returns: None \"\"\"", "mode) as f: model = pickle.load(f) return model def merge_configs(driver: dict, client: dict)", "to run them in a # # process pool. Simulating this. with arg", "-2) # # .. todo:: make this work with async queues correctly... #", "asyncio futures. # # Args: # workers: Number of workers in the Process", "def merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge Driver and Client config.", "_ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await _ # print(result)", "then it empties it by getting each item in next_queue and passing to", "Pool and returing a future. The future is then put on the write_queue.", "JSON-serializable data. Usually a dict for the config files. \"\"\" with open(path, 'r')", "def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result =", "asyncio.Futures # # Examples: # # .. code-block:: python # :linenos: # #", "is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1)", "paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver]", "config = json.load(f) return config def make_config(paths): configs = {} for fp in", "# time.sleep(1) # return a**2, b*-1 # # def async_process_pool(workers: int, func: Callable,", "def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list: # if workers", "*args, **kwargs)) for _ in iterable] # # # submitting futures to the", "func: Callable, iterable, *args, **kwargs) -> list: # if workers <= 0: #", "Output: # # (1, -2) # (0, -2) # (9, -2) # (4,", "dictionary of configs Returns: Merged configs (dict) \"\"\" return {**driver, **client} # def", "{**driver, **client} # def process_pool(workers: int, # func: Callable, # iterable: Union[list, tuple,", "# # process pool. Simulating this. with arg and kwarg. # time.sleep(1) #", "files. \"\"\" with open(path, 'r') as f: config = json.load(f) return config def", "Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an", "in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs:", "from core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]:", "workers in the Process Pool # func: function # iterable: unique values you", "by getting each item in next_queue and passing to the Process Pool and", "preferable to run them in a # # process pool. Simulating this. with", "core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values", "asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get() if", "in order. # async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)),", "configs = {} for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1]", "loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)):", "list: # if workers <= 0: # workers = cpu_count() # loop =", "msg.pop('arb') return arb, msg return {}, msg def set_arb(msg: dict, arb: dict) ->", "values you will pass to each process # args: additional values passed to", "Not necessarily in order. # async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0,", "import Path import shutil import json import asyncio import concurrent.futures from functools import", "open(path, 'r') as f: config = json.load(f) return config def make_config(paths): configs =", "-> dict: if arb: msg['arb'] = arb return msg def load_config(path: str) ->", "-> List[Coroutine]: # \"\"\" # Pass an iterable to a process pool and", "config files. \"\"\" with open(path, 'r') as f: config = json.load(f) return config", "f: config = json.load(f) return config def make_config(paths): configs = {} for fp", "print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() -", "concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value))", ", *args, **kwargs)) for _ in iterable] # # # submitting futures to", "= Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver] = {}", "break. .. todo:: this could be refactored by async_queue.worker Args: next_queue: queue containing", "msg def set_arb(msg: dict, arb: dict) -> dict: if arb: msg['arb'] = arb", "return {}, msg def set_arb(msg: dict, arb: dict) -> dict: if arb: msg['arb']", "**client} # def process_pool(workers: int, # func: Callable, # iterable: Union[list, tuple, asyncio.Queue])", "_ in iterable] # # # submitting futures to the process pool and", "in iterable] # # # submitting futures to the process pool and getting", "# \"\"\" # if workers <= 0: # workers = cpu_count() # loop", "todo:: this could be refactored by async_queue.worker Args: next_queue: queue containing the responses", "value = await next_queue.get() if not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool,", "will overwrite matching keys in the Driver config. Args: driver (dict): driver dictionary", "import datetime, timedelta from typing import Union, List, Coroutine, Callable, Tuple, Optional from", "as f: config = json.load(f) return config def make_config(paths): configs = {} for", "and return a list of asyncio futures. # # Args: # workers: Number", "the arb field exepected to be a dictionary.\"\"\" if 'arb' in msg: arb", "(i.e. no unfinished tasks in either queue), then break. .. todo:: this could", "to the Process Pool and returing a future. The future is then put", "# # start = time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() - start", "every process # # Returns: # List of asyncio.Futures # # Examples: #", "{}, msg def set_arb(msg: dict, arb: dict) -> dict: if arb: msg['arb'] =", "to finish this task func: function to use in the process pool. This", "int, func: Callable, iterable, *args, **kwargs) -> list: # if workers <= 0:", "functools import partial from datetime import datetime, timedelta from typing import Union, List,", "pathlib import Path import shutil import json import asyncio import concurrent.futures from functools", "timedelta from typing import Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing import", "due to process overhead. # # # Output: # # (1, -2) #", "# (0, -2) # (9, -2) # (4, -2) # (16, -2) #", "# (16, -2) # (25, -2) # (36, -2) # # .. todo::", "= 'rb', response_encoding=None): with open(path, mode) as f: model = pickle.load(f) return model", "todo:: make this work with async queues correctly... # \"\"\" # if workers", "Client config. The Client configs will overwrite matching keys in the Driver config.", "# in general it is preferable to run them in a # #", "driver dictionary of configs client (dict): client dictionary of configs Returns: Merged configs", "str, should be path to JSON file Returns: Any JSON-serializable data. Usually a", ".. todo:: make this work with async queues correctly... # \"\"\" # if", "\"\"\" # if workers <= 0: # workers = cpu_count() # loop =", "typing import Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count import", "dict: if arb: msg['arb'] = arb return msg def load_config(path: str) -> dict:", "from functools import partial from datetime import datetime, timedelta from typing import Union,", "# # CPU-bound operations will block the event loop: # # in general", "dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb') return arb, msg return {},", "in configs: configs[business_driver] = {} newconfig = load_config(fp) for key, val in newconfig.items():", "Tuple[dict, dict]: \"\"\"values in the arb field exepected to be a dictionary.\"\"\" if", "Args: # workers: Number of workers in the Process Pool # func: function", "<= 0: # workers = cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers)", "to every process # # Returns: # List of asyncio.Futures # # Examples:", "no unfinished tasks in either queue), then break. .. todo:: this could be", "return a**2, b*-1 # # def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs)", "asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await _ # print(result) # #", "= await next_queue.get() if not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func,", "additional values passed to every process # # Returns: # List of asyncio.Futures", "return model def merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge Driver and", "to put the list of asyncio.Futures on queue: queue containing the requests to", "#fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver] = {} newconfig = load_config(fp)", "(25, -2) # (36, -2) # # .. todo:: make this work with", "os from pathlib import Path import shutil import json import asyncio import concurrent.futures", "asyncio.run(exhaust_async_process_pool()) # end = time.time() - start # print(end) # should take a", "made. It is used to know when to finish this task func: function", "in a # # process pool. Simulating this. with arg and kwarg. #", "work # futures = [] # for ctr in range(iterable.qsize()): # value =", "responses write_queue: queue to put the list of asyncio.Futures on queue: queue containing", "make_config(paths): configs = {} for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/',", "import cpu_count import pickle from deprecated.sphinx import deprecated from configs.base import consts from", "to a process pool and return a list of asyncio futures. # #", "**kwargs)) for _ in iterable] # # # submitting futures to the process", "business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver] =", "import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in", "value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool,", "# iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable]", "= time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() - start # print(end) #", "Optional from multiprocessing import cpu_count import pickle from deprecated.sphinx import deprecated from configs.base", "queues correctly... # \"\"\" # if workers <= 0: # workers = cpu_count()", "CPU-bound operations will block the event loop: # # in general it is", "async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result", "values passed to every process # kwargs: additional values passed to every process", "# should take a littler longer than math.ceil(8/workers) due to process overhead. #", "# return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable] # elif isinstance(iterable, asyncio.Queue):", "process pool. This is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) #", "_ , *args, **kwargs)) for _ in iterable] # # # submitting futures", "list(range(8)), b=2)): # result = await _ # print(result) # # start =", "matching keys in the Driver config. Args: driver (dict): driver dictionary of configs", "of configs Returns: Merged configs (dict) \"\"\" return {**driver, **client} # def process_pool(workers:", "# .. code-block:: python # :linenos: # # def cpu_bound_func(a, b=b): # #", "# futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks: #", "them in a # # process pool. Simulating this. with arg and kwarg.", "is empty. If it is not, then it empties it by getting each", "import consts from core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) ->", "= cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return", "passed to every process # kwargs: additional values passed to every process #", "loop: # # in general it is preferable to run them in a", "iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] =", "Merge Driver and Client config. The Client configs will overwrite matching keys in", "# futures = [] # for ctr in range(iterable.qsize()): # value = iterable.get_nowait()", "the Process Pool and returing a future. The future is then put on", "write_queue. If queue's requests are completed and next_queue has completed (i.e. no unfinished", "= None): \"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This", "used to know when to finish this task func: function to use in", "# def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list: # if", "results as completed. Not necessarily in order. # async def exhaust_async_process_pool(): # for", "-2) # (36, -2) # # .. todo:: make this work with async", "completed (i.e. no unfinished tasks in either queue), then break. .. todo:: this", "**value))) next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break pool.shutdown() #", "you will pass to each process # args: additional values passed to every", "def make_config(paths): configs = {} for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1]", "driver (dict): driver dictionary of configs client (dict): client dictionary of configs Returns:", "configs def load_model(path: str, mode: str = 'rb', response_encoding=None): with open(path, mode) as", "# loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list,", "futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func,", "\"\"\" Provide path to load the JSON config Args: path: str, should be", "Tuple, Optional from multiprocessing import cpu_count import pickle from deprecated.sphinx import deprecated from", "-> dict: \"\"\" Provide path to load the JSON config Args: path: str,", "with open(path, mode) as f: model = pickle.load(f) return model def merge_configs(driver: dict,", "# todo make this work # futures = [] # for ctr in", "than math.ceil(8/workers) due to process overhead. # # # Output: # # (1,", "asyncio.Queue): # # todo make this work # futures = [] # for", "# Returns: # List of asyncio.Futures # # Examples: # # .. code-block::", "# with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs))", "a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue is empty. If it", "-2) # (4, -2) # (16, -2) # (25, -2) # (36, -2)", "datetime, timedelta from typing import Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing", "load_config(path: str) -> dict: \"\"\" Provide path to load the JSON config Args:", "the JSON config Args: path: str, should be path to JSON file Returns:", "time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() - start # print(end) # should", "the requests to be made. It is used to know when to finish", "= loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value)))", "json import asyncio import concurrent.futures from functools import partial from datetime import datetime,", "# # (1, -2) # (0, -2) # (9, -2) # (4, -2)", "requests to be made. It is used to know when to finish this", "# (9, -2) # (4, -2) # (16, -2) # (25, -2) #", "pool. This is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool", "-> Tuple[dict, dict]: \"\"\"values in the arb field exepected to be a dictionary.\"\"\"", "if 'arb' in msg: arb = msg.pop('arb') return arb, msg return {}, msg", "func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None", "on the write_queue. If queue's requests are completed and next_queue has completed (i.e.", "'rb', response_encoding=None): with open(path, mode) as f: model = pickle.load(f) return model def", "to process overhead. # # # Output: # # (1, -2) # (0,", "Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful", "# func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" #", "iterable to a process pool and return a list of asyncio futures. #", "loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _", "# if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break pool.shutdown() # not very", "time.time() - start # print(end) # should take a littler longer than math.ceil(8/workers)", "littler longer than math.ceil(8/workers) due to process overhead. # # # Output: #", "value in iterable] # elif isinstance(iterable, asyncio.Queue): # # todo make this work", "workers <= 0: # workers = cpu_count() # loop = asyncio.get_running_loop() # with", "write_queue: queue to put the list of asyncio.Futures on queue: queue containing the", "start # print(end) # should take a littler longer than math.ceil(8/workers) due to", "The future is then put on the write_queue. If queue's requests are completed", "loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await", "\"\"\" return {**driver, **client} # def process_pool(workers: int, # func: Callable, # iterable:", "-2) # (16, -2) # (25, -2) # (36, -2) # # ..", "Driver and Client config. The Client configs will overwrite matching keys in the", "of configs client (dict): client dictionary of configs Returns: Merged configs (dict) \"\"\"", "in iterable] # elif isinstance(iterable, asyncio.Queue): # # todo make this work #", "# # Returns: # List of asyncio.Futures # # Examples: # # ..", "a list of asyncio futures. # # Args: # workers: Number of workers", "# # def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list: #", "pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the arb field exepected to be", "each item in next_queue and passing to the Process Pool and returing a", "# Examples: # # .. code-block:: python # :linenos: # # def cpu_bound_func(a,", "client dictionary of configs Returns: Merged configs (dict) \"\"\" return {**driver, **client} #", "def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the arb field exepected to", "pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop() while True: await", "func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func =", "dictionary of configs client (dict): client dictionary of configs Returns: Merged configs (dict)", "= json.load(f) return config def make_config(paths): configs = {} for fp in paths:", "iterable, *args, **kwargs) -> list: # if workers <= 0: # workers =", "List[Coroutine]: # \"\"\" # Pass an iterable to a process pool and return", "pool: # return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in iterable]", "returing a future. The future is then put on the write_queue. If queue's", "Merged configs (dict) \"\"\" return {**driver, **client} # def process_pool(workers: int, # func:", "the config files. \"\"\" with open(path, 'r') as f: config = json.load(f) return", "should be path to JSON file Returns: Any JSON-serializable data. Usually a dict", "# # # submitting futures to the process pool and getting results as", "cpu_bound_func(a, b=b): # # CPU-bound operations will block the event loop: # #", "\"\"\" with open(path, 'r') as f: config = json.load(f) return config def make_config(paths):", "iterable: unique values you will pass to each process # args: additional values", "in next_queue and passing to the Process Pool and returing a future. The", "path to JSON file Returns: Any JSON-serializable data. Usually a dict for the", "list of asyncio futures. # # Args: # workers: Number of workers in", "it empties it by getting each item in next_queue and passing to the", "asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an iterable to a process pool", "def load_model(path: str, mode: str = 'rb', response_encoding=None): with open(path, mode) as f:", "asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)): # return", "Args: path: str, should be path to JSON file Returns: Any JSON-serializable data.", "msg return {}, msg def set_arb(msg: dict, arb: dict) -> dict: if arb:", "**value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if", "pickle.load(f) return model def merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge Driver", "import os from pathlib import Path import shutil import json import asyncio import", "dict: \"\"\" Provide path to load the JSON config Args: path: str, should", "'arb' in msg: arb = msg.pop('arb') return arb, msg return {}, msg def", "msg def load_config(path: str) -> dict: \"\"\" Provide path to load the JSON", "this. with arg and kwarg. # time.sleep(1) # return a**2, b*-1 # #", "[loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _ in iterable] # # #", "checks if next_queue is empty. If it is not, then it empties it", "shutil import json import asyncio import concurrent.futures from functools import partial from datetime", "kwargs: additional values passed to every process # # Returns: # List of", "write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the response html in a", "Number of workers in the Process Pool # func: function # iterable: unique", "isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable] #", "asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get() if not func: func =", "# List of asyncio.Futures # # Examples: # # .. code-block:: python #", "partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break pool.shutdown()", "for value in iterable] # elif isinstance(iterable, asyncio.Queue): # # todo make this", "# iterable: unique values you will pass to each process # args: additional", "concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for _", "dict, client: dict) -> dict: \"\"\" Merge Driver and Client config. The Client", "# Pass an iterable to a process pool and return a list of", "0: # workers = cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as", "isinstance(iterable, asyncio.Queue): # # todo make this work # futures = [] #", "arb field exepected to be a dictionary.\"\"\" if 'arb' in msg: arb =", "response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue is", "if next_queue is empty. If it is not, then it empties it by", "cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool,", "it by getting each item in next_queue and passing to the Process Pool", "await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not", "dict]: \"\"\"values in the arb field exepected to be a dictionary.\"\"\" if 'arb'", "cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable,", "debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value =", "to use in the process pool. This is self.parse Returns: None \"\"\" pool", "partial(func, **value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() #", "return arb, msg return {}, msg def set_arb(msg: dict, arb: dict) -> dict:", "overwrite matching keys in the Driver config. Args: driver (dict): driver dictionary of", "from configs.base import consts from core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg:", "should take a littler longer than math.ceil(8/workers) due to process overhead. # #", "client (dict): client dictionary of configs Returns: Merged configs (dict) \"\"\" return {**driver,", "Returns: Merged configs (dict) \"\"\" return {**driver, **client} # def process_pool(workers: int, #", "if not next_queue.empty(): value = await next_queue.get() if not func: func = value.pop('parse_func')", "exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await", "of asyncio.Futures # # Examples: # # .. code-block:: python # :linenos: #", "containing the requests to be made. It is used to know when to", "(dict) \"\"\" return {**driver, **client} # def process_pool(workers: int, # func: Callable, #", "will block the event loop: # # in general it is preferable to", "return config def make_config(paths): configs = {} for fp in paths: business_driver =", "configs will overwrite matching keys in the Driver config. Args: driver (dict): driver", "# print(end) # should take a littler longer than math.ceil(8/workers) due to process", "# def process_pool(workers: int, # func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) ->", "# .. todo:: make this work with async queues correctly... # \"\"\" #", "getting each item in next_queue and passing to the Process Pool and returing", "asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the response html in", "queue's requests are completed and next_queue has completed (i.e. no unfinished tasks in", "either queue), then break. .. todo:: this could be refactored by async_queue.worker Args:", "tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an iterable to a process", "-> list: # if workers <= 0: # workers = cpu_count() # loop", "asyncio.Futures on queue: queue containing the requests to be made. It is used", "8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop() while", "result = await _ # print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool())", "this could be refactored by async_queue.worker Args: next_queue: queue containing the responses write_queue:", "if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break pool.shutdown() # not very useful...", "from pathlib import Path import shutil import json import asyncio import concurrent.futures from", "dict: \"\"\" Merge Driver and Client config. The Client configs will overwrite matching", "configs (dict) \"\"\" return {**driver, **client} # def process_pool(workers: int, # func: Callable,", "and kwarg. # time.sleep(1) # return a**2, b*-1 # # def async_process_pool(workers: int,", "a dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb') return arb, msg return", "work with async queues correctly... # \"\"\" # if workers <= 0: #", "# start = time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() - start #", "make this work # futures = [] # for ctr in range(iterable.qsize()): #", "async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list: # if workers <=", "as completed. Not necessarily in order. # async def exhaust_async_process_pool(): # for _", "iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures async def parse_consumer(next_queue:", "containing the responses write_queue: queue to put the list of asyncio.Futures on queue:", "= concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP)", "the process pool and getting results as completed. Not necessarily in order. #", "import json import asyncio import concurrent.futures from functools import partial from datetime import", "str) -> dict: \"\"\" Provide path to load the JSON config Args: path:", "Driver config. Args: driver (dict): driver dictionary of configs client (dict): client dictionary", "for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await _ #", "it is not, then it empties it by getting each item in next_queue", "list of asyncio.Futures on queue: queue containing the requests to be made. It", "# # # Output: # # (1, -2) # (0, -2) # (9,", "(dict): driver dictionary of configs client (dict): client dictionary of configs Returns: Merged", "a process pool and return a list of asyncio futures. # # Args:", "# # todo make this work # futures = [] # for ctr", "It is used to know when to finish this task func: function to", "def cpu_bound_func(a, b=b): # # CPU-bound operations will block the event loop: #", "to JSON file Returns: Any JSON-serializable data. Usually a dict for the config", "# Args: # workers: Number of workers in the Process Pool # func:", "pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop", "# for ctr in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value)))", "return {**driver, **client} # def process_pool(workers: int, # func: Callable, # iterable: Union[list,", "# futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue,", "of asyncio futures. # # Args: # workers: Number of workers in the", "pool: # if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value", "is then put on the write_queue. If queue's requests are completed and next_queue", "Optional[Callable] = None): \"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool.", "concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue is empty. If it is", "process_pool(workers: int, # func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: #", "-> dict: \"\"\" Merge Driver and Client config. The Client configs will overwrite", "= pickle.load(f) return model def merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge", "# # Examples: # # .. code-block:: python # :linenos: # # def", "# :linenos: # # def cpu_bound_func(a, b=b): # # CPU-bound operations will block", "with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _ , *args, **kwargs)) for", "async_queue.worker Args: next_queue: queue containing the responses write_queue: queue to put the list", "pickle from deprecated.sphinx import deprecated from configs.base import consts from core import logger", "this task func: function to use in the process pool. This is self.parse", "the list of asyncio.Futures on queue: queue containing the requests to be made.", "# print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time()", "dict, arb: dict) -> dict: if arb: msg['arb'] = arb return msg def", "return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\"", "not, then it empties it by getting each item in next_queue and passing", "futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses", "f: model = pickle.load(f) return model def merge_configs(driver: dict, client: dict) -> dict:", "load_model(path: str, mode: str = 'rb', response_encoding=None): with open(path, mode) as f: model", "if business_driver not in configs: configs[business_driver] = {} newconfig = load_config(fp) for key,", "futures to the process pool and getting results as completed. Not necessarily in", "args: additional values passed to every process # kwargs: additional values passed to", "in asyncio.as_completed(async_process_pool(0, cpu_bound_func, list(range(8)), b=2)): # result = await _ # print(result) #", "multiprocessing import cpu_count import pickle from deprecated.sphinx import deprecated from configs.base import consts", "= logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the arb field", "= load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] = val return configs def", "run them in a # # process pool. Simulating this. with arg and", "function # iterable: unique values you will pass to each process # args:", "correctly... # \"\"\" # if workers <= 0: # workers = cpu_count() #", "b*-1 # # def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) -> list:", "\"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging", "= asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)): #", "function checks if next_queue is empty. If it is not, then it empties", "futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks: # break", "arb, msg return {}, msg def set_arb(msg: dict, arb: dict) -> dict: if", "config. The Client configs will overwrite matching keys in the Driver config. Args:", "print(end) # should take a littler longer than math.ceil(8/workers) due to process overhead.", "# (25, -2) # (36, -2) # # .. todo:: make this work", "futures. # # Args: # workers: Number of workers in the Process Pool", "datetime import datetime, timedelta from typing import Union, List, Coroutine, Callable, Tuple, Optional", "# (36, -2) # # .. todo:: make this work with async queues", "is used to know when to finish this task func: function to use", "be a dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb') return arb, msg", "python # :linenos: # # def cpu_bound_func(a, b=b): # # CPU-bound operations will", "start = time.time() # asyncio.run(exhaust_async_process_pool()) # end = time.time() - start # print(end)", "from datetime import datetime, timedelta from typing import Union, List, Coroutine, Callable, Tuple,", "logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the arb field exepected", "arb = msg.pop('arb') return arb, msg return {}, msg def set_arb(msg: dict, arb:", "file Returns: Any JSON-serializable data. Usually a dict for the config files. \"\"\"", "= asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # return [loop.run_in_executor(pool, partial(func, _ ,", "is preferable to run them in a # # process pool. Simulating this.", "a # # process pool. Simulating this. with arg and kwarg. # time.sleep(1)", "import Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count import pickle", "queue to put the list of asyncio.Futures on queue: queue containing the requests", "func: Optional[Callable] = None): \"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process", "response_encoding=None): with open(path, mode) as f: model = pickle.load(f) return model def merge_configs(driver:", "**kwargs) -> list: # if workers <= 0: # workers = cpu_count() #", "config Args: path: str, should be path to JSON file Returns: Any JSON-serializable", "Usually a dict for the config files. \"\"\" with open(path, 'r') as f:", "This function checks if next_queue is empty. If it is not, then it", "None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and not next_queue._unfinished_tasks:", "# def cpu_bound_func(a, b=b): # # CPU-bound operations will block the event loop:", "-2) # (9, -2) # (4, -2) # (16, -2) # (25, -2)", "concurrent.futures from functools import partial from datetime import datetime, timedelta from typing import", "client: dict) -> dict: \"\"\" Merge Driver and Client config. The Client configs", "an iterable to a process pool and return a list of asyncio futures.", "deprecated from configs.base import consts from core import logger basiclogger = logger.rabbit_logger(__name__) def", "in general it is preferable to run them in a # # process", "know when to finish this task func: function to use in the process", "return configs def load_model(path: str, mode: str = 'rb', response_encoding=None): with open(path, mode)", "from multiprocessing import cpu_count import pickle from deprecated.sphinx import deprecated from configs.base import", "be refactored by async_queue.worker Args: next_queue: queue containing the responses write_queue: queue to", "queue: queue containing the requests to be made. It is used to know", "= iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures async def", "def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the response", "Args: driver (dict): driver dictionary of configs client (dict): client dictionary of configs", "# useful for debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not", "todo make this work # futures = [] # for ctr in range(iterable.qsize()):", "(dict): client dictionary of configs Returns: Merged configs (dict) \"\"\" return {**driver, **client}", "process # # Returns: # List of asyncio.Futures # # Examples: # #", "kwarg. # time.sleep(1) # return a**2, b*-1 # # def async_process_pool(workers: int, func:", "Pool. This function checks if next_queue is empty. If it is not, then", "configs[business_driver] = {} newconfig = load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] =", "necessarily in order. # async def exhaust_async_process_pool(): # for _ in asyncio.as_completed(async_process_pool(0, cpu_bound_func,", "as pool: # if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for", "to load the JSON config Args: path: str, should be path to JSON", "# iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an iterable", "Simulating this. with arg and kwarg. # time.sleep(1) # return a**2, b*-1 #", "Callable, iterable, *args, **kwargs) -> list: # if workers <= 0: # workers", "task func: function to use in the process pool. This is self.parse Returns:", "general it is preferable to run them in a # # process pool.", "= time.time() - start # print(end) # should take a littler longer than", "next_queue is empty. If it is not, then it empties it by getting", "set_arb(msg: dict, arb: dict) -> dict: if arb: msg['arb'] = arb return msg", "in the process pool. This is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(),", "load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] = val return configs def load_model(path:", "this work with async queues correctly... # \"\"\" # if workers <= 0:", "with async queues correctly... # \"\"\" # if workers <= 0: # workers", "partial(func, _ , *args, **kwargs)) for _ in iterable] # # # submitting", "Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count import pickle from", "in either queue), then break. .. todo:: this could be refactored by async_queue.worker", "are completed and next_queue has completed (i.e. no unfinished tasks in either queue),", "put the list of asyncio.Futures on queue: queue containing the requests to be", "of asyncio.Futures on queue: queue containing the requests to be made. It is", "submitting futures to the process pool and getting results as completed. Not necessarily", "import shutil import json import asyncio import concurrent.futures from functools import partial from", "# elif isinstance(iterable, asyncio.Queue): # # todo make this work # futures =", "it is preferable to run them in a # # process pool. Simulating", "If it is not, then it empties it by getting each item in", "fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in", "dict) -> dict: \"\"\" Merge Driver and Client config. The Client configs will", "next_queue and passing to the Process Pool and returing a future. The future", "in msg: arb = msg.pop('arb') return arb, msg return {}, msg def set_arb(msg:", "partial(func, **value))) # iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue,", "on queue: queue containing the requests to be made. It is used to", "in the Driver config. Args: driver (dict): driver dictionary of configs client (dict):", "in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if next_queue is empty. If", "empty. If it is not, then it empties it by getting each item", "and returing a future. The future is then put on the write_queue. If", "(36, -2) # # .. todo:: make this work with async queues correctly...", "func: function # iterable: unique values you will pass to each process #", "return msg def load_config(path: str) -> dict: \"\"\" Provide path to load the", "configs[business_driver][key] = val return configs def load_model(path: str, mode: str = 'rb', response_encoding=None):", "make this work with async queues correctly... # \"\"\" # if workers <=", "concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if", "key, val in newconfig.items(): configs[business_driver][key] = val return configs def load_model(path: str, mode:", "tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable] # elif isinstance(iterable,", "b=2)): # result = await _ # print(result) # # start = time.time()", "asyncio import concurrent.futures from functools import partial from datetime import datetime, timedelta from", "Any JSON-serializable data. Usually a dict for the config files. \"\"\" with open(path,", "# process pool. Simulating this. with arg and kwarg. # time.sleep(1) # return", "[] # for ctr in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func,", "import deprecated from configs.base import consts from core import logger basiclogger = logger.rabbit_logger(__name__)", "will pass to each process # args: additional values passed to every process", "# # in general it is preferable to run them in a #", "Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver not in configs: configs[business_driver] = {} newconfig", "queue containing the responses write_queue: queue to put the list of asyncio.Futures on", "to each process # args: additional values passed to every process # kwargs:", "values passed to every process # # Returns: # List of asyncio.Futures #", "tasks in either queue), then break. .. todo:: this could be refactored by", "# workers = cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool:", "process pool and return a list of asyncio futures. # # Args: #", "process # args: additional values passed to every process # kwargs: additional values", "to be made. It is used to know when to finish this task", "be made. It is used to know when to finish this task func:", "loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done()", "with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func,", "process overhead. # # # Output: # # (1, -2) # (0, -2)", "not next_queue.empty(): value = await next_queue.get() if not func: func = value.pop('parse_func') futs", "configs Returns: Merged configs (dict) \"\"\" return {**driver, **client} # def process_pool(workers: int,", "arb return msg def load_config(path: str) -> dict: \"\"\" Provide path to load", "# with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool,", "# value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures", "# Output: # # (1, -2) # (0, -2) # (9, -2) #", "not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func", "import concurrent.futures from functools import partial from datetime import datetime, timedelta from typing", "configs: configs[business_driver] = {} newconfig = load_config(fp) for key, val in newconfig.items(): configs[business_driver][key]", "business_driver not in configs: configs[business_driver] = {} newconfig = load_config(fp) for key, val", "= {} newconfig = load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] = val", "Process Pool and returing a future. The future is then put on the", "func = None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and", "load the JSON config Args: path: str, should be path to JSON file", "then put on the write_queue. If queue's requests are completed and next_queue has", "Process Pool. This function checks if next_queue is empty. If it is not,", "<gh_stars>1-10 import os from pathlib import Path import shutil import json import asyncio", "open(path, mode) as f: model = pickle.load(f) return model def merge_configs(driver: dict, client:", "return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable] # elif isinstance(iterable, asyncio.Queue): #", "of workers in the Process Pool # func: function # iterable: unique values", "is not, then it empties it by getting each item in next_queue and", "exepected to be a dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb') return", "func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass", "for debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value", "with arg and kwarg. # time.sleep(1) # return a**2, b*-1 # # def", "every process # kwargs: additional values passed to every process # # Returns:", "empties it by getting each item in next_queue and passing to the Process", "-2) # (25, -2) # (36, -2) # # .. todo:: make this", "async queues correctly... # \"\"\" # if workers <= 0: # workers =", "queue containing the requests to be made. It is used to know when", "# return a**2, b*-1 # # def async_process_pool(workers: int, func: Callable, iterable, *args,", "None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) # useful for", "arg and kwarg. # time.sleep(1) # return a**2, b*-1 # # def async_process_pool(workers:", "futures = [] # for ctr in range(iterable.qsize()): # value = iterable.get_nowait() #", "= val return configs def load_model(path: str, mode: str = 'rb', response_encoding=None): with", "from typing import Union, List, Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count", "arb: dict) -> dict: if arb: msg['arb'] = arb return msg def load_config(path:", "JSON file Returns: Any JSON-serializable data. Usually a dict for the config files.", "config. Args: driver (dict): driver dictionary of configs client (dict): client dictionary of", "_ # print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool()) # end =", "passed to every process # # Returns: # List of asyncio.Futures # #", "# end = time.time() - start # print(end) # should take a littler", "Args: next_queue: queue containing the responses write_queue: queue to put the list of", "True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get() if not func:", "keys in the Driver config. Args: driver (dict): driver dictionary of configs client", "Returns: # List of asyncio.Futures # # Examples: # # .. code-block:: python", "to know when to finish this task func: function to use in the", "in the arb field exepected to be a dictionary.\"\"\" if 'arb' in msg:", "= arb return msg def load_config(path: str) -> dict: \"\"\" Provide path to", "# \"\"\" # Pass an iterable to a process pool and return a", "(9, -2) # (4, -2) # (16, -2) # (25, -2) # (36,", "the event loop: # # in general it is preferable to run them", "useful for debugging loop = asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty():", "pool and getting results as completed. Not necessarily in order. # async def", "deprecated.sphinx import deprecated from configs.base import consts from core import logger basiclogger =", "pool and return a list of asyncio futures. # # Args: # workers:", "model = pickle.load(f) return model def merge_configs(driver: dict, client: dict) -> dict: \"\"\"", "# pool = concurrent.futures.ProcessPoolExecutor(1) # useful for debugging loop = asyncio.get_running_loop() while True:", "futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue:", "self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool = concurrent.futures.ProcessPoolExecutor(1) #", "def process_pool(workers: int, # func: Callable, # iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]:", "a dict for the config files. \"\"\" with open(path, 'r') as f: config", "field exepected to be a dictionary.\"\"\" if 'arb' in msg: arb = msg.pop('arb')", "msg: arb = msg.pop('arb') return arb, msg return {}, msg def set_arb(msg: dict,", "path: str, should be path to JSON file Returns: Any JSON-serializable data. Usually", "as f: model = pickle.load(f) return model def merge_configs(driver: dict, client: dict) ->", "the write_queue. If queue's requests are completed and next_queue has completed (i.e. no", "= value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs) func = None #", "# kwargs: additional values passed to every process # # Returns: # List", "msg['arb'] = arb return msg def load_config(path: str) -> dict: \"\"\" Provide path", "in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() #", "for the config files. \"\"\" with open(path, 'r') as f: config = json.load(f)", "partial from datetime import datetime, timedelta from typing import Union, List, Coroutine, Callable,", "for ctr in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) #", "unfinished tasks in either queue), then break. .. todo:: this could be refactored", "if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable]", "- start # print(end) # should take a littler longer than math.ceil(8/workers) due", "queue), then break. .. todo:: this could be refactored by async_queue.worker Args: next_queue:", "# (1, -2) # (0, -2) # (9, -2) # (4, -2) #", "the process pool. This is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8))", "Process Pool # func: function # iterable: unique values you will pass to", "Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an iterable to a", "dict for the config files. \"\"\" with open(path, 'r') as f: config =", "configs client (dict): client dictionary of configs Returns: Merged configs (dict) \"\"\" return", "operations will block the event loop: # # in general it is preferable", "item in next_queue and passing to the Process Pool and returing a future.", "pool. Simulating this. with arg and kwarg. # time.sleep(1) # return a**2, b*-1", "return a list of asyncio futures. # # Args: # workers: Number of", "**value)) for value in iterable] # elif isinstance(iterable, asyncio.Queue): # # todo make", "This is self.parse Returns: None \"\"\" pool = concurrent.futures.ProcessPoolExecutor(max(cpu_count(), 8)) # pool =", "next_queue.get() if not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await", ".. todo:: this could be refactored by async_queue.worker Args: next_queue: queue containing the", "(0, -2) # (9, -2) # (4, -2) # (16, -2) # (25,", "then break. .. todo:: this could be refactored by async_queue.worker Args: next_queue: queue", "data. Usually a dict for the config files. \"\"\" with open(path, 'r') as", "and next_queue has completed (i.e. no unfinished tasks in either queue), then break.", "iterable: Union[list, tuple, asyncio.Queue]) -> List[Coroutine]: # \"\"\" # Pass an iterable to", "The Client configs will overwrite matching keys in the Driver config. Args: driver", "dict) -> dict: if arb: msg['arb'] = arb return msg def load_config(path: str)", "JSON config Args: path: str, should be path to JSON file Returns: Any", "\"\"\"values in the arb field exepected to be a dictionary.\"\"\" if 'arb' in", "consts from core import logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict,", "in newconfig.items(): configs[business_driver][key] = val return configs def load_model(path: str, mode: str =", "2)[1] if business_driver not in configs: configs[business_driver] = {} newconfig = load_config(fp) for", "process pool. Simulating this. with arg and kwarg. # time.sleep(1) # return a**2,", "= None # futures.append(loop.run_in_executor(pool, partial(func, **value))) next_queue.task_done() # if not queue._unfinished_tasks and not", "next_queue has completed (i.e. no unfinished tasks in either queue), then break. ..", "future. The future is then put on the write_queue. If queue's requests are", "if not func: func = value.pop('parse_func') futs = loop.run_in_executor(pool, partial(func, **value)) await write_queue.put(futs)", "by async_queue.worker Args: next_queue: queue containing the responses write_queue: queue to put the", "Path import shutil import json import asyncio import concurrent.futures from functools import partial", "end = time.time() - start # print(end) # should take a littler longer", "iterable] # elif isinstance(iterable, asyncio.Queue): # # todo make this work # futures", "ctr in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done()", "# CPU-bound operations will block the event loop: # # in general it", "the responses write_queue: queue to put the list of asyncio.Futures on queue: queue", "merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge Driver and Client config. The", "= cpu_count() # loop = asyncio.get_running_loop() # with concurrent.futures.ProcessPoolExecutor(workers) as pool: # if", "[loop.run_in_executor(pool, partial(func, **value)) for value in iterable] # elif isinstance(iterable, asyncio.Queue): # #", "val return configs def load_model(path: str, mode: str = 'rb', response_encoding=None): with open(path,", "import partial from datetime import datetime, timedelta from typing import Union, List, Coroutine,", "the Driver config. Args: driver (dict): driver dictionary of configs client (dict): client", "this work # futures = [] # for ctr in range(iterable.qsize()): # value", "# if isinstance(iterable, (list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value in", "a**2, b*-1 # # def async_process_pool(workers: int, func: Callable, iterable, *args, **kwargs) ->", "range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return", "cpu_count import pickle from deprecated.sphinx import deprecated from configs.base import consts from core", "if arb: msg['arb'] = arb return msg def load_config(path: str) -> dict: \"\"\"", "elif isinstance(iterable, asyncio.Queue): # # todo make this work # futures = []", "completed. Not necessarily in order. # async def exhaust_async_process_pool(): # for _ in", "# workers: Number of workers in the Process Pool # func: function #", "refactored by async_queue.worker Args: next_queue: queue containing the responses write_queue: queue to put", "# # .. todo:: make this work with async queues correctly... # \"\"\"", "and passing to the Process Pool and returing a future. The future is", "= asyncio.get_running_loop() while True: await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get()", "a littler longer than math.ceil(8/workers) due to process overhead. # # # Output:", "await _ # print(result) # # start = time.time() # asyncio.run(exhaust_async_process_pool()) # end", "List, Coroutine, Callable, Tuple, Optional from multiprocessing import cpu_count import pickle from deprecated.sphinx", "async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None): \"\"\" Parses the", "process # kwargs: additional values passed to every process # # Returns: #", "mode: str = 'rb', response_encoding=None): with open(path, mode) as f: model = pickle.load(f)", "(list, tuple)): # return [loop.run_in_executor(pool, partial(func, **value)) for value in iterable] # elif", "import pickle from deprecated.sphinx import deprecated from configs.base import consts from core import", "path to load the JSON config Args: path: str, should be path to", "take a littler longer than math.ceil(8/workers) due to process overhead. # # #", "requests are completed and next_queue has completed (i.e. no unfinished tasks in either", "# if workers <= 0: # workers = cpu_count() # loop = asyncio.get_running_loop()", "event loop: # # in general it is preferable to run them in", "{} for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if business_driver", "# asyncio.run(exhaust_async_process_pool()) # end = time.time() - start # print(end) # should take", "to every process # kwargs: additional values passed to every process # #", "workers: Number of workers in the Process Pool # func: function # iterable:", "basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the arb", "from deprecated.sphinx import deprecated from configs.base import consts from core import logger basiclogger", "newconfig = load_config(fp) for key, val in newconfig.items(): configs[business_driver][key] = val return configs", "time.sleep(1) # return a**2, b*-1 # # def async_process_pool(workers: int, func: Callable, iterable,", "logger basiclogger = logger.rabbit_logger(__name__) def pop_arb_field_if_exists(msg: dict) -> Tuple[dict, dict]: \"\"\"values in the", "be path to JSON file Returns: Any JSON-serializable data. Usually a dict for", "(4, -2) # (16, -2) # (25, -2) # (36, -2) # #", "= [] # for ctr in range(iterable.qsize()): # value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool,", "model def merge_configs(driver: dict, client: dict) -> dict: \"\"\" Merge Driver and Client", "val in newconfig.items(): configs[business_driver][key] = val return configs def load_model(path: str, mode: str", "# func: function # iterable: unique values you will pass to each process", "'r') as f: config = json.load(f) return config def make_config(paths): configs = {}", "getting results as completed. Not necessarily in order. # async def exhaust_async_process_pool(): #", "completed and next_queue has completed (i.e. no unfinished tasks in either queue), then", "await asyncio.sleep(consts.ASYNC_SLEEP) if not next_queue.empty(): value = await next_queue.get() if not func: func", "# args: additional values passed to every process # kwargs: additional values passed", "None): \"\"\" Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function", "value = iterable.get_nowait() # futures.append(loop.run_in_executor(pool, partial(func, **value))) # iterable.task_done() # return futures async", "and getting results as completed. Not necessarily in order. # async def exhaust_async_process_pool():", "Parses the response html in a concurrent.futures.ProcessPoolExcecutor Process Pool. This function checks if", "# return futures async def parse_consumer(next_queue: asyncio.Queue, write_queue: asyncio.Queue, func: Optional[Callable] = None):", "= {} for fp in paths: business_driver = Path(fp).parent.stem #os.path.split(fp[0])[1] #fp.rsplit('/', 2)[1] if" ]
[ "id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid", "else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in process.stdout: print(line, end='') process.wait()", "https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line", "subprocess from celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'),", "app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id", "broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config", "{0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code", "subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code outputing malformed unicode", "subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers", "task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config = config.get('runners', {}).get('celery',", "def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe", "from celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle',", "outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe)", "config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info", "celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'],", "'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config = config.get('runners',", "start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe =", "issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else", "cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in process.stdout: print(line, end='')", "import subprocess from celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND',", "encoding='utf-8', # Avoid issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd", "celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None):", "enable_utc=True, ) from qaboard.config import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True,", "result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config =", "\"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request))", "print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8',", "<reponame>Samsung/qaboard import os import subprocess from celery import Celery app = Celery('celery_app') app.conf.update(", "{}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing", "groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with", "# https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command,", "pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code outputing", "stdout=pipe, stderr=pipe) as process: for line in process.stdout: print(line, end='') process.wait() return process.returncode", "@app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id},", "os import subprocess from celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'),", "with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'],", "shell=True, encoding='utf-8', # Avoid issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape',", "code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe,", "'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config", "import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self,", "Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True,", "Avoid issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd", "accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config = config.get('runners', {}).get('celery', {})", "import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle',", "app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config import", "if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in process.stdout: print(line,", "= config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): #", "job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE", "cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with", ") from qaboard.config import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name',", "# Avoid issues with code outputing malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if", "with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code outputing malformed unicode #", "malformed unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as", "unicode # https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process:", "job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in process.stdout: print(line, end='') process.wait() return", "name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID:", "https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True,", "cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in process.stdout:", "result_serializer='pickle', enable_utc=True, ) from qaboard.config import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config)", "from qaboard.config import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\"))", "{}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job, cwd=None): # https://docs.celeryproject.org/en/stable/userguide/tasks.html#task-request-info print('Executing task", "= subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues with code outputing malformed", "Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from qaboard.config", "app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, )", "errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for line in", "{0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', # Avoid issues", "= Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL', 'pyamqp://guest@localhost//'), result_backend=os.environ.get('CELERY_RESULT_BACKEND', 'rpc://'), task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', enable_utc=True, ) from", "# https://docs.python.org/3/library/codecs.html#error-handlers errors='surrogateescape', cwd=cwd if cwd else job.run_context.job_options['cwd'], stdout=pipe, stderr=pipe) as process: for", "task id {0.id}, groupID: {0.group}'.format(self.request)) pipe = subprocess.PIPE with subprocess.Popen(job.run_context.command, shell=True, encoding='utf-8', #", "config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def start(self, job,", "import os import subprocess from celery import Celery app = Celery('celery_app') app.conf.update( broker_url=os.environ.get('CELERY_BROKER_URL',", "qaboard.config import config celery_config = config.get('runners', {}).get('celery', {}) app.conf.update(**celery_config) @app.task(bind=True, name=celery_config.get('qaboard_task_name', \"qaboard\")) def" ]
[]
[ "= tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo", "INPUT_FILE_NAME = 'input.txt' def parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1]", "velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')])", "def parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord)", "tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo)", "current_second = 0 while True: tick(spots) current_second += 1 min_y = min(spots, key", "= tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo", "max_x = max(spots, key = lambda s: s[0][0])[0][0] min_y = min(spots, key =", "hasSpotAt(x, y, spots) else ' ' for x in range(min_x, max_x + 1)]))", "spot in spots: spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] + dir", "max(spots, key = lambda s: s[0][1])[0][1] for y in range(min_y, max_y + 1):", "min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s:", "lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y == None or", "hasSpotAt(x, y, spots): for spot in spots: if spot[0] == (x,y): return True", "input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots)", "current_second += 1 min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y =", "min(spots, key = lambda s: s[0][0])[0][0] max_x = max(spots, key = lambda s:", "- 1 spots = [] with open(INPUT_FILE_NAME) as input_file: for line in input_file:", "= (spot[0][0] + dir * spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots):", "= lambda s: s[0][0])[0][0] min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y", "lambda s: s[0][0])[0][0] min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y =", "return [pos, velo] def hasSpotAt(x, y, spots): for spot in spots: if spot[0]", "s: s[0][1])[0][1] for y in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y,", "True return False def printSpots(spots): min_x = min(spots, key = lambda s: s[0][0])[0][0]", "for spot in spots: spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] +", "min_x = min(spots, key = lambda s: s[0][0])[0][0] max_x = max(spots, key =", "s[0][0])[0][0] max_x = max(spots, key = lambda s: s[0][0])[0][0] min_y = min(spots, key", "velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots): for", "spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second =", "s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] min_delta_y = max_y", "= [] with open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to", "if spot[0] == (x,y): return True return False def printSpots(spots): min_x = min(spots,", "current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return current_second - 1 spots", "', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos,", "spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] + dir * spot[1][1]) def", "s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] for y in range(min_y,", "= lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] min_delta_y", "def hasSpotAt(x, y, spots): for spot in spots: if spot[0] == (x,y): return", "' ' for x in range(min_x, max_x + 1)])) def tick(spots, dir=1): for", "coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace(' ',", "in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution to part", "y, spots) else ' ' for x in range(min_x, max_x + 1)])) def", "= 'input.txt' def parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos", "= max(spots, key = lambda s: s[0][1])[0][1] for y in range(min_y, max_y +", "1): print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for x in range(min_x,", "range(min_x, max_x + 1)])) def tick(spots, dir=1): for spot in spots: spot[0] =", "spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution to part 2: %i'", "tick(spots) current_second += 1 min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y", "1 spots = [] with open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip()))", "spot[0] == (x,y): return True return False def printSpots(spots): min_x = min(spots, key", "s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] for y in", "* spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second", "printSpots(spots) return current_second - 1 spots = [] with open(INPUT_FILE_NAME) as input_file: for", "= min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda", "= lambda s: s[0][0])[0][0] max_x = max(spots, key = lambda s: s[0][0])[0][0] min_y", "text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace('", "* spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second = 0 while True: tick(spots)", "max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for x", "print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for x in range(min_x, max_x", "min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots, key =", "max_x + 1)])) def tick(spots, dir=1): for spot in spots: spot[0] = (spot[0][0]", "current_min_delta_y = None current_second = 0 while True: tick(spots) current_second += 1 min_y", "= min(spots, key = lambda s: s[0][0])[0][0] max_x = max(spots, key = lambda", "= text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in", "None current_second = 0 while True: tick(spots) current_second += 1 min_y = min(spots,", "velo] def hasSpotAt(x, y, spots): for spot in spots: if spot[0] == (x,y):", "lambda s: s[0][0])[0][0] max_x = max(spots, key = lambda s: s[0][0])[0][0] min_y =", "lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] for y", "max(spots, key = lambda s: s[0][0])[0][0] min_y = min(spots, key = lambda s:", "return True return False def printSpots(spots): min_x = min(spots, key = lambda s:", "tick(spots, dir=1): for spot in spots: spot[0] = (spot[0][0] + dir * spot[1][0],", "searchAndPrint(spots): current_min_delta_y = None current_second = 0 while True: tick(spots) current_second += 1", "[] with open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part", "to part 1:') seconds = searchAndPrint(spots) print('Solution to part 2: %i' % (seconds,))", "(spot[0][0] + dir * spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y", "s: s[0][0])[0][0] max_x = max(spots, key = lambda s: s[0][0])[0][0] min_y = min(spots,", "tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in", "tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo =", "pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return", "in spots: if spot[0] == (x,y): return True return False def printSpots(spots): min_x", "= max_y - min_y if current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y", "velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def", "tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord", "y, spots): for spot in spots: if spot[0] == (x,y): return True return", "return current_second - 1 spots = [] with open(INPUT_FILE_NAME) as input_file: for line", "print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution to part 2: %i' %", "s: s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y == None or min_delta_y", "velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots): for spot in", "in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots) else ' '", "spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second = 0 while True: tick(spots) current_second", "== None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots)", "= lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y == None", "tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y,", "current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1)", "as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds =", "= 0 while True: tick(spots) current_second += 1 min_y = min(spots, key =", "for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace('", "current_second - 1 spots = [] with open(INPUT_FILE_NAME) as input_file: for line in", "1)])) def tick(spots, dir=1): for spot in spots: spot[0] = (spot[0][0] + dir", "= max(spots, key = lambda s: s[0][0])[0][0] min_y = min(spots, key = lambda", "< current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return current_second - 1", "key = lambda s: s[0][0])[0][0] min_y = min(spots, key = lambda s: s[0][1])[0][1]", "dir=1): for spot in spots: spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1]", "pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for coord in pos_str.replace(' ',", "= tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x,", "- min_y if current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y", "== (x,y): return True return False def printSpots(spots): min_x = min(spots, key =", "current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return current_second - 1 spots =", "dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second = 0 while True:", "max_y - min_y if current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y =", "0 while True: tick(spots) current_second += 1 min_y = min(spots, key = lambda", "min_y if current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else:", "for y in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots) else", "= max(spots, key = lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y if", "+= 1 min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots,", "def printSpots(spots): min_x = min(spots, key = lambda s: s[0][0])[0][0] max_x = max(spots,", "s[0][1])[0][1] for y in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots)", "else ' ' for x in range(min_x, max_x + 1)])) def tick(spots, dir=1):", "else: tick(spots, -1) printSpots(spots) return current_second - 1 spots = [] with open(INPUT_FILE_NAME)", "(x,y): return True return False def printSpots(spots): min_x = min(spots, key = lambda", "s[0][0])[0][0] min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots, key", "[pos, velo] def hasSpotAt(x, y, spots): for spot in spots: if spot[0] ==", "def tick(spots, dir=1): for spot in spots: spot[0] = (spot[0][0] + dir *", "None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return", "= lambda s: s[0][1])[0][1] for y in range(min_y, max_y + 1): print(''.join(['#' if", "for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots):", "line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution to", "open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds", "dir * spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None", "for spot in spots: if spot[0] == (x,y): return True return False def", "tick(spots, -1) printSpots(spots) return current_second - 1 spots = [] with open(INPUT_FILE_NAME) as", "printSpots(spots): min_x = min(spots, key = lambda s: s[0][0])[0][0] max_x = max(spots, key", "pos = tuple([int(coord) for coord in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for", "lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] min_delta_y =", "input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution to part 2:", "in pos_str.replace(' ', '').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')])", "parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos = tuple([int(coord) for", "'input.txt' def parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0], tmp_split[1] pos =", "spots) else ' ' for x in range(min_x, max_x + 1)])) def tick(spots,", "'').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots): for spot in spots: if", "in spots: spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] + dir *", "for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:') seconds = searchAndPrint(spots) print('Solution", "range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for", "1 min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots, key", "= lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] for", "True: tick(spots) current_second += 1 min_y = min(spots, key = lambda s: s[0][1])[0][1]", "def searchAndPrint(spots): current_min_delta_y = None current_second = 0 while True: tick(spots) current_second +=", "spots: spot[0] = (spot[0][0] + dir * spot[1][0], spot[0][1] + dir * spot[1][1])", "key = lambda s: s[0][0])[0][0] max_x = max(spots, key = lambda s: s[0][0])[0][0]", "y in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x, y, spots) else '", "', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots): for spot in spots:", "spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second = 0", "spot in spots: if spot[0] == (x,y): return True return False def printSpots(spots):", "<gh_stars>10-100 INPUT_FILE_NAME = 'input.txt' def parseSpotInput(text): tmp_split = text.split('>') pos_str, velo_str = tmp_split[0],", "key = lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y ==", "+ 1): print(''.join(['#' if hasSpotAt(x, y, spots) else ' ' for x in", "if current_min_delta_y == None or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots,", "key = lambda s: s[0][1])[0][1] for y in range(min_y, max_y + 1): print(''.join(['#'", "= None current_second = 0 while True: tick(spots) current_second += 1 min_y =", "x in range(min_x, max_x + 1)])) def tick(spots, dir=1): for spot in spots:", "min_delta_y else: tick(spots, -1) printSpots(spots) return current_second - 1 spots = [] with", "max_y = max(spots, key = lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y", "spots: if spot[0] == (x,y): return True return False def printSpots(spots): min_x =", "s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y == None or min_delta_y <", "if hasSpotAt(x, y, spots) else ' ' for x in range(min_x, max_x +", "with open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution to part 1:')", "spots = [] with open(INPUT_FILE_NAME) as input_file: for line in input_file: spots.append(parseSpotInput(line.rstrip())) print('Solution", "spots): for spot in spots: if spot[0] == (x,y): return True return False", "key = lambda s: s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1]", "' for x in range(min_x, max_x + 1)])) def tick(spots, dir=1): for spot", "while True: tick(spots) current_second += 1 min_y = min(spots, key = lambda s:", "max(spots, key = lambda s: s[0][1])[0][1] min_delta_y = max_y - min_y if current_min_delta_y", "in range(min_x, max_x + 1)])) def tick(spots, dir=1): for spot in spots: spot[0]", "or min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return current_second", "max_y = max(spots, key = lambda s: s[0][1])[0][1] for y in range(min_y, max_y", "s: s[0][0])[0][0] min_y = min(spots, key = lambda s: s[0][1])[0][1] max_y = max(spots,", "lambda s: s[0][1])[0][1] for y in range(min_y, max_y + 1): print(''.join(['#' if hasSpotAt(x,", "min_delta_y = max_y - min_y if current_min_delta_y == None or min_delta_y < current_min_delta_y:", "-1) printSpots(spots) return current_second - 1 spots = [] with open(INPUT_FILE_NAME) as input_file:", "return False def printSpots(spots): min_x = min(spots, key = lambda s: s[0][0])[0][0] max_x", "in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo] def hasSpotAt(x, y, spots): for spot", "= min_delta_y else: tick(spots, -1) printSpots(spots) return current_second - 1 spots = []", "+ 1)])) def tick(spots, dir=1): for spot in spots: spot[0] = (spot[0][0] +", "+ dir * spot[1][0], spot[0][1] + dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y =", "+ dir * spot[1][1]) def searchAndPrint(spots): current_min_delta_y = None current_second = 0 while", "for x in range(min_x, max_x + 1)])) def tick(spots, dir=1): for spot in", "s[0][1])[0][1] max_y = max(spots, key = lambda s: s[0][1])[0][1] min_delta_y = max_y -", "False def printSpots(spots): min_x = min(spots, key = lambda s: s[0][0])[0][0] max_x =", "'').split('<')[1].split(',')]) velo = tuple([int(velo) for velo in velo_str.replace(' ', '').split('<')[1].split(',')]) return [pos, velo]", "min_delta_y < current_min_delta_y: current_min_delta_y = min_delta_y else: tick(spots, -1) printSpots(spots) return current_second -" ]
[ "from rest_framework.response import Response from rest_framework.views import APIView, View from .models import Bug", ".serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug = Bug.objects.all()", "import Http404 from django.contrib.auth import get_user from django.shortcuts import render from rest_framework import", "return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self,", "Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self,", "modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) #####################################################################################", "Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data)", "serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin):", "= get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else:", "class BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return", "request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def", "= self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug", "serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return", "else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs = Bug.objects.all() context", "\"home.html\", context) class GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer = UserReadSerializer(user,", "class BugListPage(View): def get(self, request): bugs = Bug.objects.all() context = {\"bugs\": bugs} return", "except: raise Http404 def get(self, request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug)", "UserReadSerializer class BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True)", "get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return", "Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug", "serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors,", "View from .models import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView):", "from .models import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def", "pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self, request,", "BugListPage(View): def get(self, request): bugs = Bug.objects.all() context = {\"bugs\": bugs} return render(request,", "return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug,", "Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self,", "serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer =", "def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404 def", "BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\"", "Http404 from django.contrib.auth import get_user from django.shortcuts import render from rest_framework import status,", "BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data,", "many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def", "= BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "post(self, request): author = get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data = request.data)", "if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class", "serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk)", "from django.http.response import Http404 from django.contrib.auth import get_user from django.shortcuts import render from", "return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def", "BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else:", "get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return", "self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug =", "mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404", "modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk):", "def get(self, request): user = User.objects.all() serializer = UserReadSerializer(user, many=True) return Response(serializer.data, status=status.HTTP_200_OK)", "request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN)", "django.contrib.auth import get_user from django.shortcuts import render from rest_framework import status, mixins from", "import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer", "= BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk) serializer", "bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self, request, pk): bug", ".models import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self,", "= self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid():", "status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author", "return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self,", "from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug =", "BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer =", "from django.contrib.auth.models import User from django.http.response import Http404 from django.contrib.auth import get_user from", "bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self,", "def put(self, request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier =", "import Response from rest_framework.views import APIView, View from .models import Bug from .serializers", "BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug,", "pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated:", "get_user from django.shortcuts import render from rest_framework import status, mixins from rest_framework.response import", "get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self,", "\"\"\" def post(self, request): author = get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data", "Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object", "context) class GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer = UserReadSerializer(user, many=True)", "return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer", "status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs = Bug.objects.all()", "bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request): user = User.objects.all()", "else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk)", "get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\"", "django.shortcuts import render from rest_framework import status, mixins from rest_framework.response import Response from", "import get_user from django.shortcuts import render from rest_framework import status, mixins from rest_framework.response", "from django.conf import settings from django.contrib.auth.models import User from django.http.response import Http404 from", "{\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request): user =", "rest_framework.response import Response from rest_framework.views import APIView, View from .models import Bug from", "mixins from rest_framework.response import Response from rest_framework.views import APIView, View from .models import", "if author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data,", "raise Http404 def get(self, request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return", "class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except:", "class GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer = UserReadSerializer(user, many=True) return", "serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView,", "serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View):", "Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer", "<filename>register_api/views.py from django.conf import settings from django.contrib.auth.models import User from django.http.response import Http404", "from rest_framework.views import APIView, View from .models import Bug from .serializers import BugReadSerializer,", "Http404 def get(self, request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data,", "post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request) if", "request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors,", "bugs = Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView):", "request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self,", "request): author = get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if", "django.http.response import Http404 from django.contrib.auth import get_user from django.shortcuts import render from rest_framework", "GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer = UserReadSerializer(user, many=True) return Response(serializer.data,", "rest_framework import status, mixins from rest_framework.response import Response from rest_framework.views import APIView, View", "context = {\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request):", "def post(self, request): author = get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data =", "get(self, request): bugs = Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\", context)", "return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs", "if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class", "def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request)", "Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs = Bug.objects.all() context = {\"bugs\":", "BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk) serializer =", "try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self, request, pk):", "bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request, pk):", "put(self, request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request)", "import status, mixins from rest_framework.response import Response from rest_framework.views import APIView, View from", "import render from rest_framework import status, mixins from rest_framework.response import Response from rest_framework.views", "= {\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request): user", "Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request):", "= Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request):", "from django.contrib.auth import get_user from django.shortcuts import render from rest_framework import status, mixins", "request): bugs = Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\", context) class", "BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object except: raise", "from django.shortcuts import render from rest_framework import status, mixins from rest_framework.response import Response", "get(self, request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def", "author = get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if serializer.is_valid():", "return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs = Bug.objects.all() context =", "\"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author =", "= BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data)", "BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return", "bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if", "= Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self, request, pk): bug =", "= BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK) \"\"\" def post(self, request): serializer = BugWriteSerializer(data=request.data)", "BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request) if author.is_authenticated : serializer =", "serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request) if author.is_authenticated :", "settings from django.contrib.auth.models import User from django.http.response import Http404 from django.contrib.auth import get_user", "pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK) def put(self, request,", "status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object =", "from rest_framework import status, mixins from rest_framework.response import Response from rest_framework.views import APIView,", "Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request): bugs =", "import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class BugList(APIView): def get(self, request):", "author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED)", "##################################################################################### class BugListPage(View): def get(self, request): bugs = Bug.objects.all() context = {\"bugs\": bugs}", "render from rest_framework import status, mixins from rest_framework.response import Response from rest_framework.views import", "= Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\", context) class GetUserBugs(APIView): def", "Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def get(self, request):", "def get(self, request): bug = Bug.objects.all() serializer = BugReadSerializer(bug, many=True) return Response(serializer.data, status=status.HTTP_200_OK)", ": serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return", "return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try:", "Response from rest_framework.views import APIView, View from .models import Bug from .serializers import", "User from django.http.response import Http404 from django.contrib.auth import get_user from django.shortcuts import render", "django.contrib.auth.models import User from django.http.response import Http404 from django.contrib.auth import get_user from django.shortcuts", "if modifier.is_authenticated: if serializer.is_valid(): serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN)", "request): serializer = BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request) if author.is_authenticated", "render(request, \"home.html\", context) class GetUserBugs(APIView): def get(self, request): user = User.objects.all() serializer =", "django.conf import settings from django.contrib.auth.models import User from django.http.response import Http404 from django.contrib.auth", "Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return bug_object", "rest_framework.views import APIView, View from .models import Bug from .serializers import BugReadSerializer, BugWriteSerializer,", "Bug.objects.get(pk=pk) return bug_object except: raise Http404 def get(self, request, pk): bug = self.get_object(pk)", "import settings from django.contrib.auth.models import User from django.http.response import Http404 from django.contrib.auth import", "self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if modifier.is_authenticated: if serializer.is_valid(): serializer.save()", "status, mixins from rest_framework.response import Response from rest_framework.views import APIView, View from .models", "import User from django.http.response import Http404 from django.contrib.auth import get_user from django.shortcuts import", "= request.data) if serializer.is_valid(): serializer.save(author=author) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return", "return bug_object except: raise Http404 def get(self, request, pk): bug = self.get_object(pk) serializer", "request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier = get_user(request) if", "serializer.save() return Response(serializer.data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) else: return Response(status=status.HTTP_403_FORBIDDEN) ##################################################################################### class BugListPage(View): def", "return Response(status=status.HTTP_403_FORBIDDEN) class BugDetails(APIView, mixins.DestroyModelMixin): def get_object(self, pk): try: bug_object = Bug.objects.get(pk=pk) return", "= get_user(request) if author.is_authenticated : serializer = BugWriteSerializer(data = request.data) if serializer.is_valid(): serializer.save(author=author)", "APIView, View from .models import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer class", "status=status.HTTP_200_OK) def put(self, request, pk): bug = self.get_object(pk) serializer = BugWriteSerializer(bug, request.data) modifier", "def get(self, request): bugs = Bug.objects.all() context = {\"bugs\": bugs} return render(request, \"home.html\",", "def get(self, request, pk): bug = self.get_object(pk) serializer = BugReadSerializer(bug) return Response(serializer.data, status=status.HTTP_200_OK)", "= BugWriteSerializer(data=request.data) \"\"\" def post(self, request): author = get_user(request) if author.is_authenticated : serializer", "import APIView, View from .models import Bug from .serializers import BugReadSerializer, BugWriteSerializer, UserReadSerializer", "bug_object except: raise Http404 def get(self, request, pk): bug = self.get_object(pk) serializer =" ]
[ "PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if", "KIND, either express or implied. # See the License for the specific language", "clean_environment from specification_parser import Specification from input_data_parser import InputData from generator_tables import generate_tables", "Unless required by applicable law or agreed to in writing, software # distributed", "and analytics layer. \"\"\" import sys import argparse import logging from pal_errors import", "output \" u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML", "downloading the data from Nexus and/or Jenkins. In \" u\"this case, the section", "from Nexus and/or Jenkins. In \" u\"this case, the section 'input' in the", "u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\" ) return 1 ret_code", "Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\")", "generator_plots import generate_plots from generator_files import generate_files from static_content import prepare_static_content from generator_report", "u\"instead of downloading the data from Nexus and/or Jenkins. In \" u\"this case,", "data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec,", "file(s) which will be processed \" u\"instead of downloading the data from Nexus", "this file except in compliance with the License. # You may obtain a", "under the License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import sys import argparse", "input_data_parser import InputData from generator_tables import generate_tables from generator_plots import generate_plots from generator_files", "\"\"\"Parse arguments from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser =", "generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as err: logging.critical(f\"Finished with an", "ANY KIND, either express or implied. # See the License for the specific", "u\"this case, the section 'input' in the specification file is \" u\"ignored.\" )", "XML file(s) generated by RobotFramework or with \" u\"sub-directories with XML file(s) which", "help=u\"Directory with XML file(s) generated by RobotFramework or with \" u\"sub-directories with XML", "def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\":", ") parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if present.\" )", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec", "the section 'input' in the specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\",", "args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else:", "output will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as err:", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= 0 except AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError", "if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data)", "ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'),", "type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of", "generate_tables from generator_plots import generate_plots from generator_files import generate_files from static_content import prepare_static_content", "OF ANY KIND, either express or implied. # See the License for the", "file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\" )", "else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"]", "the data from Nexus and/or Jenkins. In \" u\"this case, the section 'input'", "return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\":", "\" u\"this case, the section 'input' in the specification file is \" u\"ignored.\"", "default=u\"\", help=u\"XML file generated by RobotFramework which will be processed \" u\"instead of", "data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if", "from environment import Environment, clean_environment from specification_parser import Specification from input_data_parser import InputData", "\" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated", "generator_report import generate_report from generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError def", "from input_data_parser import InputData from generator_tables import generate_tables from generator_plots import generate_plots from", "u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\",", "import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed arguments.", "data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data)", "if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\":", "all operational data to console. Be careful, the output \" u\"can be really", "elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError", "action=u\"store_true\", help=u\"Print all operational data to console. Be careful, the output \" u\"can", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework or with \"", "with XML file(s) generated by RobotFramework or with \" u\"sub-directories with XML file(s)", "with error.\") return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The", "u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console. Be careful, the output", "1 ret_code = 1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data =", "YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\"", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification()", "generate_files from static_content import prepare_static_content from generator_report import generate_report from generator_cpta import generate_cpta", "default=u\"master\", type=str, help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str,", "parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser", "the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed", "the old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational", "error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment) return ret_code if __name__ ==", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"\"\" import sys import argparse import logging from pal_errors import PresentationError from environment", "generate_cpta from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd line.", ") parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\" ) parser.add_argument(", "the section 'input' in the specification file is \" u\"ignored.\" ) return parser.parse_args()", "console. Be careful, the output \" u\"can be really long.\" ) parser.add_argument( u\"-i\",", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file)", "analytics layer. \"\"\" import sys import argparse import logging from pal_errors import PresentationError", "%H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with", "required by applicable law or agreed to in writing, software # distributed under", "PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err:", "return 1 ret_code = 1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data", "line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter )", "applicable law or agreed to in writing, software # distributed under the License", "arguments from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser(", "formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\",", "help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the", "type=str, default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework or with \" u\"sub-directories", "data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif", "description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument(", "its affiliates. # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "f\"The output {spec.output[u'output']} is not supported.\" ) return 1 ret_code = 1 try:", "the specific language governing permissions and # limitations under the License. \"\"\"CSIT Presentation", "or agreed to in writing, software # distributed under the License is distributed", "import argparse import logging from pal_errors import PresentationError from environment import Environment, clean_environment", "logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment) return ret_code", "= argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\"", "data to console. Be careful, the output \" u\"can be really long.\" )", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force", "Specification from input_data_parser import InputData from generator_tables import generate_tables from generator_plots import generate_plots", "at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "PresentationError from environment import Environment, clean_environment from specification_parser import Specification from input_data_parser import", "is \" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\":", "u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\",", "if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not", "spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as", "u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\" u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file", "= Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory:", "alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError,", "err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment) return", "u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework or with", "# You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0", "# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache", "u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\")", "import PresentationError from environment import Environment, clean_environment from specification_parser import Specification from input_data_parser", "from generator_report import generate_report from generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError", "finally: if spec is not None: clean_environment(spec.environment) return ret_code if __name__ == u\"__main__\":", "compliance with the License. # You may obtain a copy of the License", "'input' in the specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str,", "specification file is \" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels", "spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts()", "by RobotFramework or with \" u\"sub-directories with XML file(s) which will be processed", "environment import Environment, clean_environment from specification_parser import Specification from input_data_parser import InputData from", "spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"]", "parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO,", "u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework which will be processed \"", "logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try:", "layer. \"\"\" import sys import argparse import logging from pal_errors import PresentationError from", "import generate_report from generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError def parse_args():", "except PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\",", "long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework which", "u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args()", "not use this file except in compliance with the License. # You may", "== u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert", "License, Version 2.0 (the \"License\"); # you may not use this file except", "parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML", "default=u\"1\", type=str, help=u\"Calendar week when the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\",", "and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the \"License\");", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "and/or Jenkins. In \" u\"this case, the section 'input' in the specification file", "prepare_static_content from generator_report import generate_report from generator_cpta import generate_cpta from generator_alerts import Alerting,", "be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as err: logging.critical(f\"Finished with", "alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully", "u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "= 1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if", "import InputData from generator_tables import generate_tables from generator_plots import generate_plots from generator_files import", "in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\" ) return", "except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\")", "static_content import prepare_static_content from generator_report import generate_report from generator_cpta import generate_cpta from generator_alerts", "(the \"License\"); # you may not use this file except in compliance with", "Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the", "generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"]", "default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework or with \" u\"sub-directories with", "# Unless required by applicable law or agreed to in writing, software #", "\" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET,", "the License. # You may obtain a copy of the License at: #", "u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework which will be processed", "by applicable law or agreed to in writing, software # distributed under the", "parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if present.\" ) parser.add_argument(", "file is \" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels =", "u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d", "and # limitations under the License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import", "logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\",", "u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err))", "not supported.\" ) return 1 ret_code = 1 try: env = Environment(spec.environment, args.force)", "parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework which will be", "choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\",", "file except in compliance with the License. # You may obtain a copy", "parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification)", "the specification file is \" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\"", "data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data:", "is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging", "from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd line. :returns:", "u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the", "License for the specific language governing permissions and # limitations under the License.", "prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if", "which will be processed \" u\"instead of downloading the data from Nexus and/or", "case, the section 'input' in the specification file is \" u\"ignored.\" ) return", "as err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code =", "\"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification", "1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file:", "= Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will be", "will be processed \" u\"instead of downloading the data from Nexus and/or Jenkins.", "env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif", "to in writing, software # distributed under the License is distributed on an", "started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1 if", "from specification_parser import Specification from input_data_parser import InputData from generator_tables import generate_tables from", ") parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\",", "from static_content import prepare_static_content from generator_report import generate_report from generator_cpta import generate_cpta from", "build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to", "with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\")", "the output \" u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\",", "help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if", "file generated by RobotFramework which will be processed \" u\"instead of downloading the", "type=str, help=u\"Calendar week when the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\",", "u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if present.\" ) parser.add_argument( u\"-o\",", "2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0", "or implied. # See the License for the specific language governing permissions and", "product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report is", "args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release,", "InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert =", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "finished.\") ret_code = 0 except AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\")", "help=u\"Force removing the old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print", "if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console.", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] ==", "section 'input' in the specification file is \" u\"ignored.\" ) return parser.parse_args() def", "in writing, software # distributed under the License is distributed on an \"AS", "affiliates. # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML", "limitations under the License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import sys import", "string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when", "processed \" u\"instead of downloading the data from Nexus and/or Jenkins. In \"", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", ") parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console. Be careful,", "is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s)", "really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework", "type=str, help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar", "if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data))", "args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec", "specification_parser import Specification from input_data_parser import InputData from generator_tables import generate_tables from generator_plots", "License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "sys import argparse import logging from pal_errors import PresentationError from environment import Environment,", "with the License. # You may obtain a copy of the License at:", "spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try:", "type=str, default=u\"\", help=u\"XML file generated by RobotFramework which will be processed \" u\"instead", "the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report", "Nexus and/or Jenkins. In \" u\"this case, the section 'input' in the specification", "InputData from generator_tables import generate_tables from generator_plots import generate_plots from generator_files import generate_files", "u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str,", ") parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by RobotFramework which will", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\"", "you may not use this file except in compliance with the License. #", "logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical(", "Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License,", "of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec)", "generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week)", "= Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not", "is not supported.\" ) return 1 ret_code = 1 try: env = Environment(spec.environment,", "to console. Be careful, the output \" u\"can be really long.\" ) parser.add_argument(", "may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # #", "parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\",", "In \" u\"this case, the section 'input' in the specification file is \"", "except (KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is", "generator_files import generate_files from static_content import prepare_static_content from generator_report import generate_report from generator_cpta", "a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "use this file except in compliance with the License. # You may obtain", "= InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data()", "generated by RobotFramework or with \" u\"sub-directories with XML file(s) which will be", "'input' in the specification file is \" u\"ignored.\" ) return parser.parse_args() def main():", "env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1)", "be processed \" u\"instead of downloading the data from Nexus and/or Jenkins. In", "required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" ) parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string", "permissions and # limitations under the License. \"\"\"CSIT Presentation and analytics layer. \"\"\"", "import logging from pal_errors import PresentationError from environment import Environment, clean_environment from specification_parser", "else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError", "AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code", "copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", ") return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG,", ") parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework", "(c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version", "data from Nexus and/or Jenkins. In \" u\"this case, the section 'input' in", "removing the old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all", "logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application", "from pal_errors import PresentationError from environment import Environment, clean_environment from specification_parser import Specification", "2.0 (the \"License\"); # you may not use this file except in compliance", "logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a PAL", "help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week", "level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if present.\"", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with", "file(s) generated by RobotFramework or with \" u\"sub-directories with XML file(s) which will", "level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\")", "logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return", "governing permissions and # limitations under the License. \"\"\"CSIT Presentation and analytics layer.", "default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s)", "generated by RobotFramework which will be processed \" u\"instead of downloading the data", "# # Unless required by applicable law or agreed to in writing, software", "except PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as", "{u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args", "express or implied. # See the License for the specific language governing permissions", "with XML file(s) which will be processed \" u\"instead of downloading the data", "1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is", "(KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is not", "week when the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\",", "function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR,", "either express or implied. # See the License for the specific language governing", "Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed arguments. :rtype:", "logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with an", "present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console. Be", "generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec, args.week) elif spec.output[u\"output\"] == u\"trending\":", "error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError)", "except AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as err:", "import generate_cpta from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from cmd", "logging from pal_errors import PresentationError from environment import Environment, clean_environment from specification_parser import", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\" u\"instead of downloading the data from Nexus and/or Jenkins. In \" u\"this", "import generate_files from static_content import prepare_static_content from generator_report import generate_report from generator_cpta import", "0 except AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as", "generator_tables import generate_tables from generator_plots import generate_plots from generator_files import generate_files from static_content", "data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] ==", "ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is not None:", "Presentation and analytics layer. \"\"\" import sys import argparse import logging from pal_errors", "u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging])", "datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished", "if spec is not None: clean_environment(spec.environment) return ret_code if __name__ == u\"__main__\": sys.exit(main())", "in the specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\",", "the specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory", "with an error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment) return ret_code if", "def parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\"", "error.\") return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output", "as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment)", "== u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as err:", "%(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except", "try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output", "u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console. Be careful, the output \"", "AlertingError def parse_args(): \"\"\"Parse arguments from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING,", "u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report is published.\" ) parser.add_argument( u\"-l\",", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "with \" u\"sub-directories with XML file(s) which will be processed \" u\"instead of", "logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as err: logging.critical(f\"Finished with an alerting", "pal_errors import PresentationError from environment import Environment, clean_environment from specification_parser import Specification from", "specific language governing permissions and # limitations under the License. \"\"\"CSIT Presentation and", "parser.add_argument( u\"-r\", u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\",", "import sys import argparse import logging from pal_errors import PresentationError from environment import", "{spec.output[u'output']} is not supported.\" ) return 1 ret_code = 1 try: env =", "cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter", "sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else:", "args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec,", "AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished", "import Environment, clean_environment from specification_parser import Specification from input_data_parser import InputData from generator_tables", "action=u\"store_true\", help=u\"Force removing the old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\",", "Specification(args.specification) spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not in", "u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report is published.\" ) parser.add_argument(", ") parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report is published.\"", "from generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments", "= {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL}", "PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"):", "ret_code = 1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec)", "logging.critical( f\"The output {spec.output[u'output']} is not supported.\" ) return 1 ret_code = 1", "an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except", "args.week) elif spec.output[u\"output\"] == u\"trending\": sys.stdout.write(generate_cpta(spec, data)) try: alert = Alerting(spec) alert.generate_alerts() except", "language governing permissions and # limitations under the License. \"\"\"CSIT Presentation and analytics", "License. # You may obtain a copy of the License at: # #", "You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 #", "Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\",", "law or agreed to in writing, software # distributed under the License is", "parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the report is published.\" )", "from generator_tables import generate_tables from generator_plots import generate_plots from generator_files import generate_files from", "the License for the specific language governing permissions and # limitations under the", "operational data to console. Be careful, the output \" u\"can be really long.\"", "Jenkins. In \" u\"this case, the section 'input' in the specification file is", ":rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True,", "# limitations under the License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import sys", "parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data to console. Be careful, the", "logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s:", "data)) try: alert = Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No", "Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec) data = InputData(spec) if args.input_file: data.process_local_file(args.input_file) elif args.input_directory: data.process_local_directory(args.input_directory)", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "elif args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec,", "the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"],", "u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\":", "generate_report from generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse", "in the specification file is \" u\"ignored.\" ) return parser.parse_args() def main(): \"\"\"Main", "return 1 if spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']}", "spec.output[u\"output\"] not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\"", "data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data) if spec.output[u\"output\"] == u\"report\": generate_report(args.release, spec,", "output {spec.output[u'output']} is not supported.\" ) return 1 ret_code = 1 try: env", "logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except", "u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated", "in compliance with the License. # You may obtain a copy of the", "of downloading the data from Nexus and/or Jenkins. In \" u\"this case, the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "an error.\\n{repr(err)}\") finally: if spec is not None: clean_environment(spec.environment) return ret_code if __name__", "from generator_plots import generate_plots from generator_files import generate_files from static_content import prepare_static_content from", "XML file(s) which will be processed \" u\"instead of downloading the data from", "not in (u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\" )", "\" u\"sub-directories with XML file(s) which will be processed \" u\"instead of downloading", "import prepare_static_content from generator_report import generate_report from generator_cpta import generate_cpta from generator_alerts import", "u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated by", "u\"--force\", action=u\"store_true\", help=u\"Force removing the old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\",", "specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with", "See the License for the specific language governing permissions and # limitations under", "as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "old build(s) if present.\" ) parser.add_argument( u\"-o\", u\"--print-all-oper-data\", action=u\"store_true\", help=u\"Print all operational data", "be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str, default=u\"\", help=u\"XML file generated by", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for the specific language governing permissions and # limitations under the License. \"\"\"CSIT", "u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\" ) return 1 ret_code =", "generate_plots from generator_files import generate_files from static_content import prepare_static_content from generator_report import generate_report", "logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as", "import generate_plots from generator_files import generate_files from static_content import prepare_static_content from generator_report import", "obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "args.input_directory: data.process_local_directory(args.input_directory) else: data.download_and_parse_data(repeat=1) if args.print_all_oper_data: data.print_all_oper_data() generate_tables(spec, data) generate_plots(spec, data) generate_files(spec, data)", "argparse import logging from pal_errors import PresentationError from environment import Environment, clean_environment from", "when the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\",", "with an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a PAL error.\\n{str(err)}\")", "u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\",", "report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\",", "supported.\" ) return 1 ret_code = 1 try: env = Environment(spec.environment, args.force) env.set_environment()", "careful, the output \" u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\", type=str,", "help=u\"XML file generated by RobotFramework which will be processed \" u\"instead of downloading", ":returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument(", "err: logging.warning(repr(err)) else: logging.info(\"No output will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0", "RobotFramework which will be processed \" u\"instead of downloading the data from Nexus", "%(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec = Specification(args.specification) spec.read_specification() except PresentationError:", "Version 2.0 (the \"License\"); # you may not use this file except in", "RobotFramework or with \" u\"sub-directories with XML file(s) which will be processed \"", "except in compliance with the License. # You may obtain a copy of", "section 'input' in the specification file is \" u\"ignored.\" ) parser.add_argument( u\"-d\", u\"--input-directory\",", "or with \" u\"sub-directories with XML file(s) which will be processed \" u\"instead", "spec.read_specification() except PresentationError: logging.critical(u\"Finished with error.\") return 1 if spec.output[u\"output\"] not in (u\"none\",", "logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args =", "a PAL error.\\n{str(err)}\") except (KeyError, ValueError) as err: logging.critical(f\"Finished with an error.\\n{repr(err)}\") finally:", "ret_code = 0 except AlertingError as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except", "parser.add_argument( u\"-d\", u\"--input-directory\", type=str, default=u\"\", help=u\"Directory with XML file(s) generated by RobotFramework or", "from cmd line. :returns: Parsed arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__,", "logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\",", "generator_cpta import generate_cpta from generator_alerts import Alerting, AlertingError def parse_args(): \"\"\"Parse arguments from", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\": logging.CRITICAL} args = parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s:", "argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\", required=True, type=argparse.FileType(u'r'), help=u\"Specification YAML file.\" )", "by RobotFramework which will be processed \" u\"instead of downloading the data from", "Environment, clean_environment from specification_parser import Specification from input_data_parser import InputData from generator_tables import", ") return 1 ret_code = 1 try: env = Environment(spec.environment, args.force) env.set_environment() prepare_static_content(spec)", "u\"sub-directories with XML file(s) which will be processed \" u\"instead of downloading the", "u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument( u\"-f\", u\"--force\", action=u\"store_true\", help=u\"Force removing", "arguments. :rtype: ArgumentParser \"\"\" parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( u\"-s\", u\"--specification\",", "<gh_stars>0 # Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the", "alert = Alerting(spec) alert.generate_alerts() except AlertingError as err: logging.warning(repr(err)) else: logging.info(\"No output will", "as err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with", "import generate_tables from generator_plots import generate_plots from generator_files import generate_files from static_content import", "case, the section 'input' in the specification file is \" u\"ignored.\" ) parser.add_argument(", "the License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import sys import argparse import", "u\"--release\", default=u\"master\", type=str, help=u\"Release string of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\",", "log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\": logging.ERROR, u\"CRITICAL\":", "Be careful, the output \" u\"can be really long.\" ) parser.add_argument( u\"-i\", u\"--input-file\",", "help=u\"Print all operational data to console. Be careful, the output \" u\"can be", "\"\"\"CSIT Presentation and analytics layer. \"\"\" import sys import argparse import logging from", "from generator_files import generate_files from static_content import prepare_static_content from generator_report import generate_report from", "(u\"none\", u\"report\", u\"trending\"): logging.critical( f\"The output {spec.output[u'output']} is not supported.\" ) return 1", "= parse_args() logging.basicConfig(format=u\"%(asctime)s: %(levelname)s: %(message)s\", datefmt=u\"%Y/%m/%d %H:%M:%S\", level=log_levels[args.logging]) logging.info(u\"Application started.\") try: spec =", "import Specification from input_data_parser import InputData from generator_tables import generate_tables from generator_plots import", "err: logging.critical(f\"Finished with an alerting error.\\n{repr(err)}\") except PresentationError as err: logging.critical(f\"Finished with a", ") parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" )", "will be generated.\") logging.info(u\"Successfully finished.\") ret_code = 0 except AlertingError as err: logging.critical(f\"Finished", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "of the product.\" ) parser.add_argument( u\"-w\", u\"--week\", default=u\"1\", type=str, help=u\"Calendar week when the", "License. \"\"\"CSIT Presentation and analytics layer. \"\"\" import sys import argparse import logging", "parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\", u\"WARNING\", u\"ERROR\", u\"CRITICAL\"], default=u\"ERROR\", help=u\"Logging level.\" ) parser.add_argument(", "\"\"\"Main function.\"\"\" log_levels = {u\"NOTSET\": logging.NOTSET, u\"DEBUG\": logging.DEBUG, u\"INFO\": logging.INFO, u\"WARNING\": logging.WARNING, u\"ERROR\":", "help=u\"Calendar week when the report is published.\" ) parser.add_argument( u\"-l\", u\"--logging\", choices=[u\"DEBUG\", u\"INFO\"," ]
[ "n_classes): \"\"\"Calculates the logaritm of the probability of belonging to each class. Arguments:", "k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training", "variables. \"\"\" try: X = X.values except AttributeError: pass try: X_train = X_train.values", "mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability of belonging to each class.", "= np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments:", "+ class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest", "-- Indendent variables. y {DataFrame} -- Dependent variable. Returns: int -- Most common", "more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test", "= train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred =", "\"\"\"Calculates the probability of feature j, given class k, using Laplace smoothing. Arguments:", "posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] = posterior[:,", "-- Dependent variables. n_classes {int} -- Number of classes. Returns: ndarray -- Class", "y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common class in input. Arguments: X", "test {DataTuple} -- Test data. All extra keyword arguments are passed to method.", "https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Keyword", "train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k =", "-- Training data. test {DataTuple} -- Test data. k {int} -- Value for", "pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for", "candidates, pick one randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int --", "on the most common class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent", "test {DataTuple} -- Test data. Returns: ndarray -- Predicted values. \"\"\" max_category =", "y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB", "max_k {int} -- Maximum value for k. (default: {MAX_K}) Returns: int -- Optimal", "{FOLDS}) max_k {int} -- Maximum value for k. (default: {MAX_K}) Returns: int --", "candidates = [x for x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates)", "priors. ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X,", "of class labels. Returns: ndarray -- Log of prior probabilities. \"\"\" priors =", "RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform", "Classifies using the most common class in training data. Arguments: train {DataTuple} --", "extra keyword arguments are passed to method. Returns: float -- Error value returned", "k using cross validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds", "{int} -- Value for k. Returns: ndarray -- Predicted values. \"\"\" y_pred =", "except AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N,", "= folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix,", "AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1))", "using the most common class in training data. Arguments: train {DataTuple} -- Training", "posterior[:, c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred", "= k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using", "max_category {int} -- Class to classify to. Returns: ndarray -- Predicted values. \"\"\"", "TODO: combine with k_nn_regression_fit()? X = train.X.values y = train.y.values N = X.shape[0]", "class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def", "divided by the number of points. Arguments: y_pred {ndarray} -- Predicted values. y_true", "If there are multiple candidates, pick one randomly. Arguments: class_sums {list} -- Class", "-- Independent variables. y {ndarray} -- Dependent variables. n_classes {int} -- Number of", "-- Number of classes. Returns: ndarray -- Class priors. ndarray -- Feature likelihoods.", "n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum =", "np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods):", "y_train = y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0]", "in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j in range(n_features): Fnc", "test {DataTuple} -- Test data. k {int} -- Value for k. Returns: ndarray", "Keyword Arguments: n_folds {int} -- Number of folds to use for validation. (default:", "min_error = mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train,", "most common class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train", "class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for x in order", "for c in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return priors", "return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability of", "cross validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds {int} --", "= X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums", "X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N): point = X[i, :]", "dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier.", "range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] =", "test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most common class in", "Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True values. Returns: float", "posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :])", "k. \"\"\" # TODO: combine with k_nn_regression_fit()? X = train.X.values y = train.y.values", "folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :],", "\"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test,", "-- Predicted values. y_true {ndarray} -- True values. Returns: float -- Error. \"\"\"", "Arguments: X {ndarray} -- Independent variables. y {ndarray} -- Dependent variables. n_classes {int}", "{ndarray} -- Class labels. n_classes {int} -- Number of class labels. Returns: ndarray", "Test data. Returns: ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred", "n_folds {int} -- Number of folds to use for validation. (default: {FOLDS}) max_k", "common class in training data. Arguments: train {DataTuple} -- Training data. test {DataTuple}", "c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k):", "of folds to use for validation. (default: {FOLDS}) max_k {int} -- Maximum value", "(default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X = train.X.values train_y =", "-- Dependent training variables. k {int} -- Value of k. Keyword Arguments: n_classes", "MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs):", "error_func, train, test, **kwargs): \"\"\"Perform classification for data and return error. Arguments: method", "most common class. If there are multiple candidates, pick one randomly. Arguments: class_sums", "int -- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for", "def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability of belonging to each", "to method. Returns: float -- Error value returned by error_func. \"\"\" y_pred =", "p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y == c,", "y {ndarray} -- Class labels. n_classes {int} -- Number of class labels. Returns:", "return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors", "{DataFrame} -- Dependent training variables. k {int} -- Value of k. Keyword Arguments:", "Returns: ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return", "n_classes {int} -- Number of class labels. Returns: ndarray -- Log of prior", "class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category):", "most common class in training data. Arguments: train {DataTuple} -- Training data. test", "selected k. \"\"\" # TODO: combine with k_nn_regression_fit()? X = train.X.values y =", "value for k. (default: {MAX_K}) Returns: int -- Optimal value for k. float", "value for k. float -- Error for selected k. \"\"\" # TODO: combine", "Returns: ndarray -- Predicted values. \"\"\" train_X = train.X.values train_y = train.y.values test_X", "use for validation. (default: {FOLDS}) max_k {int} -- Maximum value for k. (default:", "Error for selected k. \"\"\" # TODO: combine with k_nn_regression_fit()? X = train.X.values", "k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class labels based on the", "{list} -- Class frequencies. Returns: int -- Assinged class label. \"\"\" order =", "with k_nn_regression_fit()? X = train.X.values y = train.y.values N = X.shape[0] folds =", "== c, :]) for j in range(n_features): Fnc = np.sum(X[y == c, j])", "-- Number of classes. Returns: ndarray -- Logs of feature likelihoods. \"\"\" n_features", "import numpy as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities", "= train.X.values y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error", "data. k {int} -- Value for k. Returns: ndarray -- Predicted values. \"\"\"", "incorrectly assinged classes divided by the number of points. Arguments: y_pred {ndarray} --", "1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes", "validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds {int} -- Number", "k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class. If", "logaritm of the probability of belonging to each class. Arguments: y {ndarray} --", "-- Independent variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent", "for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label", "Features. y {ndarray} -- Class labels. n_classes {int} -- Number of classes. Returns:", "in training data. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test", "except AttributeError: pass try: y_train = y_train.values except AttributeError: pass assert X.shape[1] ==", "k. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray", "Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable. Returns: int", "-- Maximum value for k. (default: {MAX_K}) Returns: int -- Optimal value for", "def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal value for", "k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for k in range(1, max_k):", "Training data. test {DataTuple} -- Test data. Returns: ndarray -- Predicted values. \"\"\"", "test, **kwargs): \"\"\"Perform classification for data and return error. Arguments: method {function} --", "Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray}", "-- Dependent variable. Returns: int -- Most common class. \"\"\" y = y.values", "-- True values. Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int)", "All extra keyword arguments are passed to method. Returns: float -- Error value", "in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 +", "Indendent variables. y {DataFrame} -- Dependent variable. Returns: int -- Most common class.", "error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) /", "Independent variables. y {ndarray} -- Dependent variables. n_classes {int} -- Number of classes.", "Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray --", "\"\"\"Calculates the logaritm of the probability of belonging to each class. Arguments: y", "for c in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred =", "feature likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in", "c in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return priors def", "combine with k_nn_regression_fit()? X = train.X.values y = train.y.values N = X.shape[0] folds", "ndarray -- Predicted variables. \"\"\" try: X = X.values except AttributeError: pass try:", "-- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for x", "variables. max_category {int} -- Class to classify to. Returns: ndarray -- Predicted values.", "of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try: X =", "range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return", "training variables. k {int} -- Value of k. Keyword Arguments: n_classes {int} --", "classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for data and return error. Arguments:", "based on the most common class in k-nearest neighbors. Arguments: X {DataFrame} --", "Training data. test {DataTuple} -- Test data. k {int} -- Value for k.", "-- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return", "Fc_sum = np.sum(X[y == c, :]) for j in range(n_features): Fnc = np.sum(X[y", "finding optimal value for k using cross validation. Arguments: train {DataTuple} -- Training", "for selected k. \"\"\" # TODO: combine with k_nn_regression_fit()? X = train.X.values y", "-- Class frequencies. Returns: int -- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1]", "machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for", "return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at:", "training data. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data.", "N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train,", "using K-nearest neighbors classifier. Assigns class labels based on the most common class", "Number of classes. Returns: ndarray -- Logs of feature likelihoods. \"\"\" n_features =", "ndarray -- Logs of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes,", "return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} --", "in range(N): point = X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels", "variables. y {DataFrame} -- Dependent variable. Returns: int -- Most common class. \"\"\"", "n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for i in", "-- Training data. test {DataTuple} -- Test data. Returns: ndarray -- Predicted values.", "from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification", "k. Returns: ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k)", "(valid_ix.size * error) mean_error = np.sum(errors) / N if mean_error < min_error: min_error", "= y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] =", "{N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try: X = X.values except AttributeError:", "train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error =", "== c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability", "\"\"\"Classification methods.\"\"\" import numpy as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED", "int -- Most common class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return", "c in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j in range(n_features):", "{DataTuple} -- Test data. k {int} -- Value for k. Returns: ndarray --", "validation. (default: {FOLDS}) max_k {int} -- Maximum value for k. (default: {MAX_K}) Returns:", "get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for data and return", "-- Log of prior probabilities. \"\"\" priors = np.zeros(n_classes) for c in range(n_classes):", "feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray}", "for k using cross validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments:", "y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred =", "/ N if mean_error < min_error: min_error = mean_error best_k = k return", "np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn", "feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def", "y): \"\"\"Determines the most common class in input. Arguments: X {DataFrame} -- Indendent", "max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more", "{ndarray} -- Predicted values. y_true {ndarray} -- True values. Returns: float -- Error.", "in input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable.", "values. Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int) != y_true.astype(np.int))", "FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test,", "test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return", "k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest", "np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train", "classification error. Sum of incorrectly assinged classes divided by the number of points.", "np.sum(errors) / N if mean_error < min_error: min_error = mean_error best_k = k", "i in range(N): point = X[i, :] neighbors, _ = get_k_nn(point, X_train, k)", "except AttributeError: pass try: X_train = X_train.values except AttributeError: pass try: y_train =", "= mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X,", "classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X = train.X.values train_y", "MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} -- Class priors.", "k_nn_regression_fit()? X = train.X.values y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N,", "naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data.", "-- Training data. Keyword Arguments: n_folds {int} -- Number of folds to use", "classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. k", "-- Error for selected k. \"\"\" # TODO: combine with k_nn_regression_fit()? X =", "k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple} --", "np.sum(X[y == c, :]) for j in range(n_features): Fnc = np.sum(X[y == c,", "N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1", "\"\"\" # TODO: combine with k_nn_regression_fit()? X = train.X.values y = train.y.values N", "the most common class in input. Arguments: X {DataFrame} -- Indendent variables. y", "probability of feature j, given class k, using Laplace smoothing. Arguments: X {ndarray}", "N = X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i, :]", "Returns: ndarray -- Log of prior probabilities. \"\"\" priors = np.zeros(n_classes) for c", "= np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y == c, :])", "y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i]", "-- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods.", "valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix],", "-- Independent training variables. y_train {DataFrame} -- Dependent training variables. k {int} --", "pass try: y_train = y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N", "\"\"\"Classify using max classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int} --", ":] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] = posterior[:, c]", "k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size", "y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier.", "float -- Error for selected k. \"\"\" # TODO: combine with k_nn_regression_fit()? X", "-- Training data. test {DataTuple} -- Test data. Keyword Arguments: n_classes {int} --", "class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i,", "Returns: ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X,", "= np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X", "variables. y {ndarray} -- Dependent variables. n_classes {int} -- Number of classes. Returns:", "\"\"\"Assing label according the most common class. If there are multiple candidates, pick", "np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix =", "np.zeros((N, n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c", "p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij def", "values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS,", ":]) for j in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j]", "class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred", "{DataTuple} -- Training data. Keyword Arguments: n_folds {int} -- Number of folds to", "mean_error = np.sum(errors) / N if mean_error < min_error: min_error = mean_error best_k", "True values. Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int) !=", "= class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N):", "test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple}", "input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable. Returns:", "{int} -- Maximum value for k. (default: {MAX_K}) Returns: int -- Optimal value", "np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly assinged classes divided", "-- Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes = class_priors.size N", "the probability of belonging to each class. Arguments: y {ndarray} -- Class labels.", "labels. Returns: ndarray -- Log of prior probabilities. \"\"\" priors = np.zeros(n_classes) for", "return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common class in input. Arguments:", "np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features +", "train.X.values y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error =", "error) mean_error = np.sum(errors) / N if mean_error < min_error: min_error = mean_error", "if mean_error < min_error: min_error = mean_error best_k = k return int(best_k), min_error", "X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i,", "\"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods", "priors and feature likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray} --", "j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij", "mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes):", "\"\"\"Classify using MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} --", "min_error: min_error = mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train,", "y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class labels based on", "priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes", "Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted", "the most common class in training data. Arguments: train {DataTuple} -- Training data.", "y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the", "Independent variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent training", "= test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods)", "i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred", ":], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size *", "import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method, error_func,", "= np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size)", "= posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train,", "y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) / N if mean_error", "{DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable. Returns: int -- Most", "**kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most", "j, given class k, using Laplace smoothing. Arguments: X {ndarray} -- Features. y", "neighbors classifier by finding optimal value for k using cross validation. Arguments: train", "return error. Arguments: method {function} -- Classification method. error_func {function} -- Error function.", "feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the", "mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES):", "\"\"\"Classify using K-nearest neighbors classifier. Assigns class labels based on the most common", "probabilities. \"\"\" priors = np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y ==", "using Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray} -- Class labels.", "Class labels. n_classes {int} -- Number of class labels. Returns: ndarray -- Log", "k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for data and", "X_train {DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent training variables. k", "X_train = X_train.values except AttributeError: pass try: y_train = y_train.values except AttributeError: pass", "{ndarray} -- True values. Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return", "K-nearest neighbors classifier. Assigns class labels based on the most common class in", "range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most", "ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category)", "priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature j, given class", "machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def classification(method,", "classifier. Calculates class priors and feature likelihoods. Arguments: X {ndarray} -- Independent variables.", "Assigns class labels based on the most common class in k-nearest neighbors. Arguments:", "Training data. Keyword Arguments: n_folds {int} -- Number of folds to use for", "Returns: ndarray -- Predicted variables. \"\"\" try: X = X.values except AttributeError: pass", "common class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X,", "{ndarray} -- Features. y {ndarray} -- Class labels. n_classes {int} -- Number of", "Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\"", "the probability of feature j, given class k, using Laplace smoothing. Arguments: X", "= mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y,", "n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j", "values. \"\"\" train_X = train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods", "X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class labels based", "priors[c] = np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes):", "Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for x in", "\"\"\" n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for i", "common class in input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} --", "{DataFrame} -- Dependent variable. Returns: int -- Most common class. \"\"\" y =", "{function} -- Classification method. error_func {function} -- Error function. train {DataTuple} -- Training", "{N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X = train.X.values train_y = train.y.values", "x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return", "= y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred", "\"\"\"Perform classification for data and return error. Arguments: method {function} -- Classification method.", "-- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values.", "-- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes)", "Dependent training variables. k {int} -- Value of k. Keyword Arguments: n_classes {int}", "Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try: X", "y_train {DataFrame} -- Dependent training variables. k {int} -- Value of k. Keyword", "y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True values. Returns: float --", "ndarray -- Predicted values. \"\"\" n_classes = class_priors.size N = X.shape[0] posterior =", "Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns:", "\"\"\" try: X = X.values except AttributeError: pass try: X_train = X_train.values except", "\"\"\"Maximum classifier. Classifies using the most common class in training data. Arguments: train", "k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors", "X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for k", "i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing", "n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} --", "for x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true):", "-- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try:", "class in input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent", "range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j in range(n_features): Fnc =", "using cross validation. Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds {int}", "class labels. Returns: ndarray -- Log of prior probabilities. \"\"\" priors = np.zeros(n_classes)", "y, n_classes): \"\"\"Calculates the probability of feature j, given class k, using Laplace", "labels based on the most common class in k-nearest neighbors. Arguments: X {DataFrame}", "data. All extra keyword arguments are passed to method. Returns: float -- Error", "def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame} -- Independent variables.", "assert X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for i", "Maximum value for k. (default: {MAX_K}) Returns: int -- Optimal value for k.", "for i in range(N): point = X[i, :] neighbors, _ = get_k_nn(point, X_train,", "Predicted values. \"\"\" train_X = train.X.values train_y = train.y.values test_X = test.X.values class_priors,", "def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data.", "{int} -- Class to classify to. Returns: ndarray -- Predicted values. \"\"\" y_pred", "classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try: X = X.values", "variable. Returns: int -- Most common class. \"\"\" y = y.values max_category =", "value returned by error_func. \"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values)", "y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier.", "X {DataFrame} -- Independent variables. max_category {int} -- Class to classify to. Returns:", "= mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier.", "return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray}", "ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y,", "using MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} -- Class", "def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature j, given class k,", "mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray} -- Independent variables.", "c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def", "values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train,", "= np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates", "X = train.X.values y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds)", "np.zeros((N, 1)) for i in range(N): point = X[i, :] neighbors, _ =", "-- Test data. All extra keyword arguments are passed to method. Returns: float", "-- Predicted values. \"\"\" train_X = train.X.values train_y = train.y.values test_X = test.X.values", "train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most", "min_error = np.infty best_k = 1 for k in range(1, max_k): errors =", "from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes, get_k_nn def", "j] = np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X,", "(default: {MAX_K}) Returns: int -- Optimal value for k. float -- Error for", "# TODO: combine with k_nn_regression_fit()? X = train.X.values y = train.y.values N =", "{int} -- Value of k. Keyword Arguments: n_classes {int} -- Number of classes.", "\"\"\"Return classification error. Sum of incorrectly assinged classes divided by the number of", "Arguments: X {ndarray} -- Features. y {ndarray} -- Class labels. n_classes {int} --", "values. \"\"\" n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes)) for", "MNB classifier. Calculates class priors and feature likelihoods. Arguments: X {ndarray} -- Independent", "= np.sum(X[y == c, :]) for j in range(n_features): Fnc = np.sum(X[y ==", "Predicted variables. \"\"\" try: X = X.values except AttributeError: pass try: X_train =", "test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors,", "n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class labels based on the most", "= max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common class", "of feature j, given class k, using Laplace smoothing. Arguments: X {ndarray} --", "y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors and feature", "y_true {ndarray} -- True values. Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape)", "y {ndarray} -- Dependent variables. n_classes {int} -- Number of classes. Returns: ndarray", "= np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix", "tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :],", "{int} -- Number of class labels. Returns: ndarray -- Log of prior probabilities.", "Returns: ndarray -- Predicted values. \"\"\" n_classes = class_priors.size N = X.shape[0] posterior", "X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N): point", "= 1 for k in range(1, max_k): errors = np.zeros(n_folds) for i in", "Independent variables. max_category {int} -- Class to classify to. Returns: ndarray -- Predicted", "get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for i", "mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature j, given class k, using", ":], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error", "error_func {function} -- Error function. train {DataTuple} -- Training data. test {DataTuple} --", "Dependent variables. n_classes {int} -- Number of classes. Returns: ndarray -- Class priors.", "Returns: float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int) != y_true.astype(np.int)) /", "-- Number of class labels. Returns: ndarray -- Log of prior probabilities. \"\"\"", "range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix,", "classification for data and return error. Arguments: method {function} -- Classification method. error_func", "y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier.", "X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error)", "def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray} -- Independent", "feature likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray} -- Dependent variables.", "{MAX_K}) Returns: int -- Optimal value for k. float -- Error for selected", "each class. Arguments: y {ndarray} -- Class labels. n_classes {int} -- Number of", "n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates", "neighbors classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data.", "-- Most common class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category", "{ndarray} -- Independent variables. y {ndarray} -- Dependent variables. n_classes {int} -- Number", "y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES):", "def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class", "Most common class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def", "train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X,", "X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame}", "classes divided by the number of points. Arguments: y_pred {ndarray} -- Predicted values.", "= k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest", "error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most common class", "Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X", "{DataTuple} -- Training data. test {DataTuple} -- Test data. k {int} -- Value", "/ y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature", "by finding optimal value for k using cross validation. Arguments: train {DataTuple} --", "pick one randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int -- Assinged", "train {DataTuple} -- Training data. Keyword Arguments: n_folds {int} -- Number of folds", "y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature j,", "multiple candidates, pick one randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int", "Calculates class priors and feature likelihoods. Arguments: X {ndarray} -- Independent variables. y", "and feature likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray} -- Dependent", "Test data. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns:", "train {DataTuple} -- Training data. test {DataTuple} -- Test data. k {int} --", "train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes):", "{int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\"", "class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors", "class k, using Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray} --", "n_classes {int} -- Number of classes. Returns: ndarray -- Logs of feature likelihoods.", "are passed to method. Returns: float -- Error value returned by error_func. \"\"\"", "feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes =", "X {ndarray} -- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} --", "== order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly", "y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums)", "of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c", "\"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test", "test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the", "{ndarray} -- Dependent variables. n_classes {int} -- Number of classes. Returns: ndarray --", "= np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features", "classes. Returns: ndarray -- Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors =", "errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) / N if mean_error <", "-- Value of k. Keyword Arguments: n_classes {int} -- Number of classes. (default:", "= feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] = posterior[:, c] +", "train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by", "k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns class labels", "neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training variables.", "train_X = train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X,", "-- Training data. test {DataTuple} -- Test data. All extra keyword arguments are", "y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes", "{ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes = class_priors.size", "feature j, given class k, using Laplace smoothing. Arguments: X {ndarray} -- Features.", "for validation. (default: {FOLDS}) max_k {int} -- Maximum value for k. (default: {MAX_K})", "return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame} --", "error. Arguments: method {function} -- Classification method. error_func {function} -- Error function. train", "data. Returns: ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred =", "N = X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N): point =", "train {DataTuple} -- Training data. test {DataTuple} -- Test data. Returns: ndarray --", "-- Class labels. n_classes {int} -- Number of classes. Returns: ndarray -- Logs", "{DataTuple} -- Test data. Returns: ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X,", "range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 + Fnc)", "training variables. y_train {DataFrame} -- Dependent training variables. k {int} -- Value of", "def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class. If there are multiple", "class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} --", "X {ndarray} -- Independent variables. y {ndarray} -- Dependent variables. n_classes {int} --", "Dependent variable. Returns: int -- Most common class. \"\"\" y = y.values max_category", "label according the most common class. If there are multiple candidates, pick one", "data and return error. Arguments: method {function} -- Classification method. error_func {function} --", "[x for x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred,", "points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True values. Returns:", "def max_classifier_fit(X, y): \"\"\"Determines the most common class in input. Arguments: X {DataFrame}", "assinged classes divided by the number of points. Arguments: y_pred {ndarray} -- Predicted", "_ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i)", "mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates", "max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments:", "= k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for k in range(1,", "one randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int -- Assinged class", "try: X = X.values except AttributeError: pass try: X_train = X_train.values except AttributeError:", "Number of folds to use for validation. (default: {FOLDS}) max_k {int} -- Maximum", "= classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) / N", "{int} -- Number of folds to use for validation. (default: {FOLDS}) max_k {int}", "n_classes): \"\"\"Fit MNB classifier. Calculates class priors and feature likelihoods. Arguments: X {ndarray}", "by the number of points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray}", "train {DataTuple} -- Training data. test {DataTuple} -- Test data. All extra keyword", "of incorrectly assinged classes divided by the number of points. Arguments: y_pred {ndarray}", "\"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training", "train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier", "k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in range(n_classes)]", "return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class. If there", "np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix])", "return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal", "Error value returned by error_func. \"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred,", "{DataTuple} -- Training data. test {DataTuple} -- Test data. Keyword Arguments: n_classes {int}", "class. Arguments: y {ndarray} -- Class labels. n_classes {int} -- Number of class", "{int} -- Number of classes. Returns: ndarray -- Logs of feature likelihoods. \"\"\"", "-- Features. y {ndarray} -- Class labels. n_classes {int} -- Number of classes.", "-- Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods", "-- Value for k. Returns: ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X,", "ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred", "Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train,", "in range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:]", "Returns: int -- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x", "the most common class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables.", "k, using Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray} -- Class", "returned by error_func. \"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def", "variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent training variables.", "order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly assinged", "= np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial", "data. Keyword Arguments: n_folds {int} -- Number of folds to use for validation.", "data. test {DataTuple} -- Test data. Returns: ndarray -- Predicted values. \"\"\" max_category", "Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using", "(default: {FOLDS}) max_k {int} -- Maximum value for k. (default: {MAX_K}) Returns: int", "errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i)", "y = train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty", "labels. n_classes {int} -- Number of classes. Returns: ndarray -- Logs of feature", "c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of", "Returns: float -- Error value returned by error_func. \"\"\" y_pred = method(train, test,", "{DataTuple} -- Test data. Keyword Arguments: n_classes {int} -- Number of classes. (default:", "class priors and feature likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray}", "np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive", "n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted variables.", "in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification", "Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred def", "-- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X", "keyword arguments are passed to method. Returns: float -- Error value returned by", "classifier. Assigns class labels based on the most common class in k-nearest neighbors.", "Returns: ndarray -- Logs of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij =", "{DataFrame} -- Independent training variables. y_train {DataFrame} -- Dependent training variables. k {int}", "Arguments: class_sums {list} -- Class frequencies. Returns: int -- Assinged class label. \"\"\"", "{DataTuple} -- Training data. test {DataTuple} -- Test data. Returns: ndarray -- Predicted", "by error_func. \"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train,", "in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent", "n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values.", "k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding", "Training data. test {DataTuple} -- Test data. All extra keyword arguments are passed", "train {DataTuple} -- Training data. test {DataTuple} -- Test data. Keyword Arguments: n_classes", "y {ndarray} -- Class labels. n_classes {int} -- Number of classes. Returns: ndarray", ":] neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels", "max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common class in input.", "the logaritm of the probability of belonging to each class. Arguments: y {ndarray}", "classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly assinged classes divided by the", "test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple}", "== i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums):", "mnb_classifier_fit(train_X, train_y, n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y,", "posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1) return y_pred def k_nn_classifier(train, test,", "in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred =", "max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y):", "Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training variables. y_train", "of k. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns:", "range(N): point = X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels =", "classes. Returns: ndarray -- Logs of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij", "-- Logs of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features))", "Logs of feature likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for", "if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum", "n_classes) y_pred = mnb_classifier_predict(test_X, class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit", "i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:,", "Number of classes. Returns: ndarray -- Class priors. ndarray -- Feature likelihoods. \"\"\"", "return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the probability of feature j, given", "labels. n_classes {int} -- Number of class labels. Returns: ndarray -- Log of", "return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors and", "\"\"\"'Fit' K-nearest neighbors classifier by finding optimal value for k using cross validation.", "\"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum", "max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most common class in training data.", "np.infty best_k = 1 for k in range(1, max_k): errors = np.zeros(n_folds) for", "* error) mean_error = np.sum(errors) / N if mean_error < min_error: min_error =", "def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for data and return error.", "n_classes): \"\"\"Calculates the probability of feature j, given class k, using Laplace smoothing.", "neighbors classifier. Assigns class labels based on the most common class in k-nearest", "[np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def", "the number of points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} --", "k. (default: {MAX_K}) Returns: int -- Optimal value for k. float -- Error", "Sum of incorrectly assinged classes divided by the number of points. Arguments: y_pred", "best_k = 1 for k in range(1, max_k): errors = np.zeros(n_folds) for i", "best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify", "y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal value", "X {DataFrame} -- Indendent variables. y {DataFrame} -- Dependent variable. Returns: int --", "np.argsort(class_sums)[::-1] candidates = [x for x in order if x == order[0]] return", "\"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum", "Arguments: n_folds {int} -- Number of folds to use for validation. (default: {FOLDS})", "ndarray -- Predicted values. \"\"\" train_X = train.X.values train_y = train.y.values test_X =", "= np.log((1.0 + Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors,", "variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray", "methods.\"\"\" import numpy as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from", "= get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for", "= X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y", "== X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N):", "data. Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Returns:", "error. Sum of incorrectly assinged classes divided by the number of points. Arguments:", "ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return", "-- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category return y_pred", "according the most common class. If there are multiple candidates, pick one randomly.", "test): \"\"\"Maximum classifier. Classifies using the most common class in training data. Arguments:", "/ (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB", "Predicted values. y_true {ndarray} -- True values. Returns: float -- Error. \"\"\" y_true", "{ndarray} -- Class labels. n_classes {int} -- Number of classes. Returns: ndarray --", "for k. Returns: ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y,", "-- Optimal value for k. float -- Error for selected k. \"\"\" #", "x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of", "{ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray -- Predicted", "mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of", "max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame} -- Independent variables. max_category", "\"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for x in order if x", "for k in range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds", "label. \"\"\" order = np.argsort(class_sums)[::-1] candidates = [x for x in order if", "to. Returns: ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) *", "data. test {DataTuple} -- Test data. All extra keyword arguments are passed to", "test {DataTuple} -- Test data. Keyword Arguments: n_classes {int} -- Number of classes.", ":]) for c in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred", "= tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k)", "y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit'", "{int} -- Number of classes. Returns: ndarray -- Class priors. ndarray -- Feature", "classify to. Returns: ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int)", "method. Returns: float -- Error value returned by error_func. \"\"\" y_pred = method(train,", "p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray} --", "\"\"\" priors = np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y == c)", "feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c]", "train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i]", "numpy as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import", "max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int}", "+ Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments:", "= np.zeros((N, 1)) for i in range(N): point = X[i, :] neighbors, _", "number of points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True", "n_folds) min_error = np.infty best_k = 1 for k in range(1, max_k): errors", "= k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] =", "min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier. Assigns", "max_classifier_fit(X, y): \"\"\"Determines the most common class in input. Arguments: X {DataFrame} --", "Error function. train {DataTuple} -- Training data. test {DataTuple} -- Test data. All", "class in training data. Arguments: train {DataTuple} -- Training data. test {DataTuple} --", "to use for validation. (default: {FOLDS}) max_k {int} -- Maximum value for k.", "-- Independent variables. max_category {int} -- Class to classify to. Returns: ndarray --", "Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X =", "Predicted values. \"\"\" n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N, n_classes))", "\"\"\"Determines the most common class in input. Arguments: X {DataFrame} -- Indendent variables.", "of the probability of belonging to each class. Arguments: y {ndarray} -- Class", "data. test {DataTuple} -- Test data. k {int} -- Value for k. Returns:", "at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data.", "= y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max", "y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class. If there are", "using max classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int} -- Class", "np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return", "likelihoods. Arguments: X {ndarray} -- Independent variables. y {ndarray} -- Dependent variables. n_classes", "k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors)", "int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k, n_classes=N_CLASSES): \"\"\"Classify using K-nearest neighbors classifier.", "values. y_true {ndarray} -- True values. Returns: float -- Error. \"\"\" y_true =", "data. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray", "return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly assinged classes", "n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in", "float -- Error. \"\"\" y_true = y_true.reshape(y_pred.shape) return np.sum(y_pred.astype(np.int) != y_true.astype(np.int)) / float(y_pred.size)", "class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier. Arguments: X {ndarray} -- Independent variables. class_priors", "of points. Arguments: y_pred {ndarray} -- Predicted values. y_true {ndarray} -- True values.", "= np.sum(errors) / N if mean_error < min_error: min_error = mean_error best_k =", "{function} -- Error function. train {DataTuple} -- Training data. test {DataTuple} -- Test", "Optimal value for k. float -- Error for selected k. \"\"\" # TODO:", "{DataTuple} -- Training data. test {DataTuple} -- Test data. All extra keyword arguments", "to each class. Arguments: y {ndarray} -- Class labels. n_classes {int} -- Number", "folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for k in", "= mnb_feature_likelihoods(X, y, n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm", "return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most common", "order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def classification_error(y_pred, y_true): \"\"\"Return classification error.", "Arguments: train {DataTuple} -- Training data. Keyword Arguments: n_folds {int} -- Number of", "Class labels. n_classes {int} -- Number of classes. Returns: ndarray -- Logs of", "{DataFrame} -- Independent variables. max_category {int} -- Class to classify to. Returns: ndarray", "import k_fold_split_indexes, get_k_nn def classification(method, error_func, train, test, **kwargs): \"\"\"Perform classification for data", "def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments:", "y_pred = np.zeros((N, 1)) for i in range(N): point = X[i, :] neighbors,", "-- Predicted values. \"\"\" n_classes = class_priors.size N = X.shape[0] posterior = np.zeros((N,", "-- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def", "= X.shape[0] y_pred = np.zeros((N, 1)) for i in range(N): point = X[i,", "method. error_func {function} -- Error function. train {DataTuple} -- Training data. test {DataTuple}", "n_classes) return class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability", "there are multiple candidates, pick one randomly. Arguments: class_sums {list} -- Class frequencies.", "class labels based on the most common class in k-nearest neighbors. Arguments: X", "data. test {DataTuple} -- Test data. Keyword Arguments: n_classes {int} -- Number of", "== c, j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum))", "class. If there are multiple candidates, pick one randomly. Arguments: class_sums {list} --", "common class. If there are multiple candidates, pick one randomly. Arguments: class_sums {list}", "class_sums {list} -- Class frequencies. Returns: int -- Assinged class label. \"\"\" order", "axis=1) return y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple}", "\"\"\" train_X = train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods =", "bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test", "train, test, **kwargs): \"\"\"Perform classification for data and return error. Arguments: method {function}", "c, :]) for j in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c,", "* max_category return y_pred def multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See", "c in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior,", "Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred", "for k. (default: {MAX_K}) Returns: int -- Optimal value for k. float --", "belonging to each class. Arguments: y {ndarray} -- Class labels. n_classes {int} --", "= X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k = 1 for", "Class to classify to. Returns: ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0],", "X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y ==", "range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X, y,", "X.shape[1] == X_train.shape[1] N = X.shape[0] y_pred = np.zeros((N, 1)) for i in", "Arguments: X {DataFrame} -- Independent variables. max_category {int} -- Class to classify to.", "max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common class in", "max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame} -- Independent", "Feature likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes = class_priors.size N =", "-- Error value returned by error_func. \"\"\" y_pred = method(train, test, **kwargs) return", "class_priors, feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class", "y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors and feature likelihoods. Arguments: X", "= X.shape[0] posterior = np.zeros((N, n_classes)) for i in range(N): posterior[i, :] =", "AttributeError: pass try: X_train = X_train.values except AttributeError: pass try: y_train = y_train.values", "{DataTuple} -- Test data. All extra keyword arguments are passed to method. Returns:", "in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes): posterior[:, c]", "likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return class_priors,", "float -- Error value returned by error_func. \"\"\" y_pred = method(train, test, **kwargs)", "prior probabilities. \"\"\" priors = np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y", "n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal value for k using", "c, j]) p_ij[c, j] = np.log((1.0 + Fnc) / (n_features + Fc_sum)) return", "Value for k. Returns: ndarray -- Predicted values. \"\"\" y_pred = k_nn_classifier_predict(test.X, train.X,", "K-nearest neighbors classifier by finding optimal value for k using cross validation. Arguments:", "for j in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j] =", "classifier by finding optimal value for k using cross validation. Arguments: train {DataTuple}", "in range(n_classes): posterior[:, c] = posterior[:, c] + class_priors[c] y_pred = np.argmax(posterior, axis=1)", "range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix", "in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the", "= X_train.values except AttributeError: pass try: y_train = y_train.values except AttributeError: pass assert", "AttributeError: pass try: y_train = y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1]", "np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X, y, n_classes): \"\"\"Calculates the", "(default: {N_CLASSES}) Returns: ndarray -- Predicted variables. \"\"\" try: X = X.values except", "optimal value for k using cross validation. Arguments: train {DataTuple} -- Training data.", "feature_likelihoods) return y_pred def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors", "Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods = mnb_feature_likelihoods(X, y, n_classes) return", "max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the", "for k. float -- Error for selected k. \"\"\" # TODO: combine with", "variables. k {int} -- Value of k. Keyword Arguments: n_classes {int} -- Number", "value for k using cross validation. Arguments: train {DataTuple} -- Training data. Keyword", "np.zeros((n_classes, n_features)) for c in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for", "Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. k {int}", "j in range(n_features): Fnc = np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0", "= train.y.values N = X.shape[0] folds = k_fold_split_indexes(N, n_folds) min_error = np.infty best_k", "class_sums = [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return", "Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Returns: ndarray", "mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors and feature likelihoods. Arguments:", "feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability of belonging to", "variables. y_train {DataFrame} -- Dependent training variables. k {int} -- Value of k.", "\"\"\"Fit MNB classifier. Calculates class priors and feature likelihoods. Arguments: X {ndarray} --", "train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes)", "Classification method. error_func {function} -- Error function. train {DataTuple} -- Training data. test", "Class frequencies. Returns: int -- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates", "in range(n_classes): priors[c] = np.log(np.sum(y == c) / y.size) return priors def mnb_feature_likelihoods(X,", "X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums =", "y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines the most common", "X = X.values except AttributeError: pass try: X_train = X_train.values except AttributeError: pass", "y_pred def k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training", "Returns: ndarray -- Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y,", "classifier. Classifies using the most common class in training data. Arguments: train {DataTuple}", "probability of belonging to each class. Arguments: y {ndarray} -- Class labels. n_classes", "-- Test data. k {int} -- Value for k. Returns: ndarray -- Predicted", "1 for k in range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds):", "for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for c in range(n_classes):", "y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common", "X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels == i) for i in", "y {DataFrame} -- Dependent variable. Returns: int -- Most common class. \"\"\" y", "likelihoods. \"\"\" n_features = X.shape[1] p_ij = np.zeros((n_classes, n_features)) for c in range(n_classes):", "classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple}", "of belonging to each class. Arguments: y {ndarray} -- Class labels. n_classes {int}", "class_priors, feature_likelihoods def mnb_class_priors(y, n_classes): \"\"\"Calculates the logaritm of the probability of belonging", "k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal value for k", "of prior probabilities. \"\"\" priors = np.zeros(n_classes) for c in range(n_classes): priors[c] =", "+ Fnc) / (n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify", "= (valid_ix.size * error) mean_error = np.sum(errors) / N if mean_error < min_error:", "given class k, using Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray}", "Arguments: y {ndarray} -- Class labels. n_classes {int} -- Number of class labels.", "k_nn_classifier(train, test, k): \"\"\"K-nearest neighbors classifier. Arguments: train {DataTuple} -- Training data. test", "ndarray -- Log of prior probabilities. \"\"\" priors = np.zeros(n_classes) for c in", "\"\"\" y_pred = k_nn_classifier_predict(test.X, train.X, train.y, k) return y_pred def k_nn_classifier_fit(train, n_folds=FOLDS, max_k=MAX_K):", "priors = np.zeros(n_classes) for c in range(n_classes): priors[c] = np.log(np.sum(y == c) /", "i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according", "= method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies", "classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int} -- Class to classify", "Training data. test {DataTuple} -- Test data. Keyword Arguments: n_classes {int} -- Number", "likelihoods. Returns: ndarray -- Predicted values. \"\"\" n_classes = class_priors.size N = X.shape[0]", "Independent training variables. y_train {DataFrame} -- Dependent training variables. k {int} -- Value", "= np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error = classification_error(y_pred,", "for data and return error. Arguments: method {function} -- Classification method. error_func {function}", "arguments are passed to method. Returns: float -- Error value returned by error_func.", "**kwargs): \"\"\"Perform classification for data and return error. Arguments: method {function} -- Classification", "np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using max classifier. Arguments: X {DataFrame}", "def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using the most common class in training", "\"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X,", "classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error = np.sum(errors) / N if", "{ndarray} -- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature", "-- Test data. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES})", "(n_features + Fc_sum)) return p_ij def mnb_classifier_predict(X, class_priors, feature_likelihoods): \"\"\"Classify using MNB classifier.", "= X.values except AttributeError: pass try: X_train = X_train.values except AttributeError: pass try:", "-- Classification method. error_func {function} -- Error function. train {DataTuple} -- Training data.", "class_priors {ndarray} -- Class priors. feature_likelihoods {ndarray} -- Feature likelihoods. Returns: ndarray --", "= np.argsort(class_sums)[::-1] candidates = [x for x in order if x == order[0]]", "k {int} -- Value for k. Returns: ndarray -- Predicted values. \"\"\" y_pred", "= k_nn_assign_label(class_sums) return y_pred def k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class.", "y_true): \"\"\"Return classification error. Sum of incorrectly assinged classes divided by the number", "most common class in input. Arguments: X {DataFrame} -- Indendent variables. y {DataFrame}", "\"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify", "-- Class to classify to. Returns: ndarray -- Predicted values. \"\"\" y_pred =", "n_classes {int} -- Number of classes. Returns: ndarray -- Class priors. ndarray --", "= mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X, X_train, y_train, k,", "See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train {DataTuple} -- Training data. test {DataTuple} --", "= [np.sum(train_labels == i) for i in range(n_classes)] y_pred[i] = k_nn_assign_label(class_sums) return y_pred", "k {int} -- Value of k. Keyword Arguments: n_classes {int} -- Number of", "neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors] class_sums = [np.sum(train_labels ==", "= train.X.values train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y,", "to classify to. Returns: ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1),", "-- Number of folds to use for validation. (default: {FOLDS}) max_k {int} --", "Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes) feature_likelihoods =", "N if mean_error < min_error: min_error = mean_error best_k = k return int(best_k),", "y = y.values max_category = np.bincount(y.astype(int)).argmax() return max_category def max_classifier_predict(X, max_category): \"\"\"Classify using", "max_k=MAX_K): \"\"\"'Fit' K-nearest neighbors classifier by finding optimal value for k using cross", "passed to method. Returns: float -- Error value returned by error_func. \"\"\" y_pred", "-- Test data. Returns: ndarray -- Predicted values. \"\"\" max_category = max_classifier_fit(train.X, train.y)", "k in range(1, max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds =", "X_train.values except AttributeError: pass try: y_train = y_train.values except AttributeError: pass assert X.shape[1]", "Fnc = np.sum(X[y == c, j]) p_ij[c, j] = np.log((1.0 + Fnc) /", "are multiple candidates, pick one randomly. Arguments: class_sums {list} -- Class frequencies. Returns:", "-- Error function. train {DataTuple} -- Training data. test {DataTuple} -- Test data.", "train_y = train.y.values test_X = test.X.values class_priors, feature_likelihoods = mnb_classifier_fit(train_X, train_y, n_classes) y_pred", "method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test): \"\"\"Maximum classifier. Classifies using", "point = X[i, :] neighbors, _ = get_k_nn(point, X_train, k) train_labels = y_train[neighbors]", "= np.infty best_k = 1 for k in range(1, max_k): errors = np.zeros(n_folds)", "k. float -- Error for selected k. \"\"\" # TODO: combine with k_nn_regression_fit()?", "the most common class. If there are multiple candidates, pick one randomly. Arguments:", "int -- Optimal value for k. float -- Error for selected k. \"\"\"", "Number of class labels. Returns: ndarray -- Log of prior probabilities. \"\"\" priors", "X {ndarray} -- Features. y {ndarray} -- Class labels. n_classes {int} -- Number", "order = np.argsort(class_sums)[::-1] candidates = [x for x in order if x ==", "Test data. k {int} -- Value for k. Returns: ndarray -- Predicted values.", "y[train_ix], k) error = classification_error(y_pred, y[valid_ix]) errors[i] = (valid_ix.size * error) mean_error =", "ndarray -- Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors = mnb_class_priors(y, n_classes)", "smoothing. Arguments: X {ndarray} -- Features. y {ndarray} -- Class labels. n_classes {int}", "classifier. Arguments: X {ndarray} -- Independent variables. class_priors {ndarray} -- Class priors. feature_likelihoods", "mean_error < min_error: min_error = mean_error best_k = k return int(best_k), min_error def", "folds to use for validation. (default: {FOLDS}) max_k {int} -- Maximum value for", "for i in range(n_folds): tmp_folds = folds[:] valid_ix = tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds)", "max classifier. Arguments: X {DataFrame} -- Independent variables. max_category {int} -- Class to", "= np.zeros((N, n_classes)) for i in range(N): posterior[i, :] = feature_likelihoods.dot(X[i, :]) for", "multinomial_naive_bayes_classifier(train, test, n_classes=N_CLASSES): \"\"\"Multinomial naive bayes classifier. See more at: https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Multinomial_naive_Bayes Arguments: train", "X.values except AttributeError: pass try: X_train = X_train.values except AttributeError: pass try: y_train", "Returns: int -- Optimal value for k. float -- Error for selected k.", "Test data. All extra keyword arguments are passed to method. Returns: float --", "of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\" train_X = train.X.values", "Returns: int -- Most common class. \"\"\" y = y.values max_category = np.bincount(y.astype(int)).argmax()", "frequencies. Returns: int -- Assinged class label. \"\"\" order = np.argsort(class_sums)[::-1] candidates =", "= max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def max_classifier_fit(X, y): \"\"\"Determines", "def classification_error(y_pred, y_true): \"\"\"Return classification error. Sum of incorrectly assinged classes divided by", "< min_error: min_error = mean_error best_k = k return int(best_k), min_error def k_nn_classifier_predict(X,", "common class in k-nearest neighbors. Arguments: X {DataFrame} -- Independent variables. X_train {DataFrame}", "try: y_train = y_train.values except AttributeError: pass assert X.shape[1] == X_train.shape[1] N =", "Laplace smoothing. Arguments: X {ndarray} -- Features. y {ndarray} -- Class labels. n_classes", "1)) for i in range(N): point = X[i, :] neighbors, _ = get_k_nn(point,", "tmp_folds.pop(i) train_ix = np.concatenate(tmp_folds) y_pred = k_nn_classifier_predict(X[valid_ix, :], X[train_ix, :], y[train_ix], k) error", "pass try: X_train = X_train.values except AttributeError: pass try: y_train = y_train.values except", "Arguments: train {DataTuple} -- Training data. test {DataTuple} -- Test data. Keyword Arguments:", "as np from machine_learning.constants import N_CLASSES, FOLDS, MAX_K, RANDOM_SEED from machine_learning.utilities import k_fold_split_indexes,", "method {function} -- Classification method. error_func {function} -- Error function. train {DataTuple} --", "and return error. Arguments: method {function} -- Classification method. error_func {function} -- Error", "values. \"\"\" max_category = max_classifier_fit(train.X, train.y) y_pred = max_classifier_predict(test.X, max_category) return y_pred def", "def mnb_classifier_fit(X, y, n_classes): \"\"\"Fit MNB classifier. Calculates class priors and feature likelihoods.", "variables. n_classes {int} -- Number of classes. Returns: ndarray -- Class priors. ndarray", "{DataFrame} -- Independent variables. X_train {DataFrame} -- Independent training variables. y_train {DataFrame} --", "randomly. Arguments: class_sums {list} -- Class frequencies. Returns: int -- Assinged class label.", "= [x for x in order if x == order[0]] return np.random.RandomState(RANDOM_SEED).choice(candidates) def", "error_func. \"\"\" y_pred = method(train, test, **kwargs) return error_func(y_pred, test.y.values) def max_classifier(train, test):", "function. train {DataTuple} -- Training data. test {DataTuple} -- Test data. All extra", "of classes. Returns: ndarray -- Class priors. ndarray -- Feature likelihoods. \"\"\" class_priors", "Arguments: method {function} -- Classification method. error_func {function} -- Error function. train {DataTuple}", "for c in range(n_classes): Fc_sum = np.sum(X[y == c, :]) for j in", "k_nn_assign_label(class_sums): \"\"\"Assing label according the most common class. If there are multiple candidates,", "-- Predicted variables. \"\"\" try: X = X.values except AttributeError: pass try: X_train", "Log of prior probabilities. \"\"\" priors = np.zeros(n_classes) for c in range(n_classes): priors[c]", "Value of k. Keyword Arguments: n_classes {int} -- Number of classes. (default: {N_CLASSES})", "Returns: ndarray -- Predicted values. \"\"\" y_pred = np.ones((X.shape[0], 1), dtype=np.int) * max_category", "{int} -- Number of classes. (default: {N_CLASSES}) Returns: ndarray -- Predicted values. \"\"\"", "try: X_train = X_train.values except AttributeError: pass try: y_train = y_train.values except AttributeError:", "max_k): errors = np.zeros(n_folds) for i in range(n_folds): tmp_folds = folds[:] valid_ix =", "-- Class labels. n_classes {int} -- Number of class labels. Returns: ndarray --", "of classes. Returns: ndarray -- Logs of feature likelihoods. \"\"\" n_features = X.shape[1]" ]
[ "is # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version", "to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from", "key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls,", "http://aws.amazon.com/apache2.0/ # # or in the \"license\" file accompanying this file. This file", "is located at # # http://aws.amazon.com/apache2.0/ # # or in the \"license\" file", "License, Version 2.0 (the \"License\"). You # may not use this file except", "pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from", "You # may not use this file except in compliance with the License.", "EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import", "This file is # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "ANY KIND, either express or implied. See the License for the specific #", "kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent():", "not use this file except in compliance with the License. A copy of", "and limitations under the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest", "WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig", "# may not use this file except in compliance with the License. A", "wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\",", "RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey(", ") VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or", "def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs)", "See the License for the specific # language governing permissions and limitations under", "def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test =", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"). You", "[pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS =", "dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ]", "the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the", "with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls,", "# # http://aws.amazon.com/apache2.0/ # # or in the \"license\" file accompanying this file.", "validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys", "@pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig,", "Version 2.0 (the \"License\"). You # may not use this file except in", "the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from", "issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test = cls(**kwargs) assert isinstance(test.provider_id,", "License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/", "all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\",", "import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark =", "] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def", "the License. A copy of # the License is located at # #", "dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls,", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY", "in compliance with the License. A copy of # the License is located", "from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local]", "@pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs):", "Licensed under the Apache License, Version 2.0 (the \"License\"). You # may not", "provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\",", "License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers", "2.0 (the \"License\"). You # may not use this file except in compliance", "License for the specific # language governing permissions and limitations under the License.", "import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base", "import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY =", "from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig", "= { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\",", "kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with", ".unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a", "the Apache License, Version 2.0 (the \"License\"). You # may not use this", "# or in the \"license\" file accompanying this file. This file is #", "WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a", "def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\",", "= WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [", "kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def", "file accompanying this file. This file is # distributed on an \"AS IS\"", "aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY", "<filename>test/unit/test_providers_raw_master_key_config.py<gh_stars>10-100 # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. #", "KIND, either express or implied. See the License for the specific # language", "aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from", "aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils", "# # Licensed under the Apache License, Version 2.0 (the \"License\"). You #", "under the Apache License, Version 2.0 (the \"License\"). You # may not use", "symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a", "under the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six", "this file except in compliance with the License. A copy of # the", "six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import", "MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit,", "file except in compliance with the License. A copy of # the License", "{ RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a", "permissions and limitations under the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import", "located at # # http://aws.amazon.com/apache2.0/ # # or in the \"license\" file accompanying", "WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs,", "wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def", "in the \"license\" file accompanying this file. This file is # distributed on", "suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm", "test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def", "except in compliance with the License. A copy of # the License is", "kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig)", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\").", "# Licensed under the Apache License, Version 2.0 (the \"License\"). You # may", "# ANY KIND, either express or implied. See the License for the specific", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "\"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import", "aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark", "pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, )", "= [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS", "or its affiliates. All Rights Reserved. # # Licensed under the Apache License,", "cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert", "\"License\"). You # may not use this file except in compliance with the", "this file. This file is # distributed on an \"AS IS\" BASIS, WITHOUT", "RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\",", "all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError):", "express or implied. See the License for the specific # language governing permissions", "all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls,", "Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed", "pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = {", "assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test = cls(**kwargs) assert", "test_attributes_invalid_kwargs(cls, kwargs): with pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS))", "with the License. A copy of # the License is located at #", "cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test", "Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express", "provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS))", "WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the", "affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "the \"license\" file accompanying this file. This file is # distributed on an", "from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING,", "} @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS)) def test_attributes_invalid_kwargs(cls,", "test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test = cls(**kwargs)", "# # or in the \"license\" file accompanying this file. This file is", "may not use this file except in compliance with the License. A copy", "provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls,", "at # # http://aws.amazon.com/apache2.0/ # # or in the \"license\" file accompanying this", "pytest.raises(TypeError): cls(**kwargs) def test_parent(): assert issubclass(RawMasterKeyConfig, MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs):", "import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey", "either express or implied. See the License for the specific # language governing", "# http://aws.amazon.com/apache2.0/ # # or in the \"license\" file accompanying this file. This", "# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF #", "wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw", "the License for the specific # language governing permissions and limitations under the", "test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType,", "(the \"License\"). You # may not use this file except in compliance with", "key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs)", "# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # #", "VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw", "compliance with the License. A copy of # the License is located at", "WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See", "raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs):", "OF # ANY KIND, either express or implied. See the License for the", "import all_invalid_kwargs, all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric", "of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either", "all_valid_kwargs pytestmark = [pytest.mark.unit, pytest.mark.local] STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC,", "or in the \"license\" file accompanying this file. This file is # distributed", "STATIC_WRAPPING_KEY = WrappingKey( wrapping_algorithm=WrappingAlgorithm.AES_256_GCM_IV12_TAG16_NO_PADDING, wrapping_key=b\"_________a symmetric key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig:", "wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\", all_invalid_kwargs(VALID_KWARGS))", "use this file except in compliance with the License. A copy of #", "CONDITIONS OF # ANY KIND, either express or implied. See the License for", "import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import", "\"license\" file accompanying this file. This file is # distributed on an \"AS", "[ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY),", "copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # #", "2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under", "# the License is located at # # http://aws.amazon.com/apache2.0/ # # or in", "Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the", "language governing permissions and limitations under the License. \"\"\"Unit test suite to validate", "for the specific # language governing permissions and limitations under the License. \"\"\"Unit", "# language governing permissions and limitations under the License. \"\"\"Unit test suite to", "file. This file is # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import six from aws_encryption_sdk.identifiers import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import", "limitations under the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\" import pytest import", "from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from .unit_test_utils import all_invalid_kwargs, all_valid_kwargs", "file is # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY),", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied.", "License is located at # # http://aws.amazon.com/apache2.0/ # # or in the \"license\"", "specific # language governing permissions and limitations under the License. \"\"\"Unit test suite", "or implied. See the License for the specific # language governing permissions and", "A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ #", "implied. See the License for the specific # language governing permissions and limitations", "the specific # language governing permissions and limitations under the License. \"\"\"Unit test", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND,", "governing permissions and limitations under the License. \"\"\"Unit test suite to validate aws_encryption_sdk.key_providers.raw.RawMasterKeyConfig\"\"\"", "raw key\", provider_id=\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), dict(key_id=b\"a raw key\", provider_id=b\"a provider\", wrapping_key=STATIC_WRAPPING_KEY), ] }", "MasterKeyConfig) @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_converts(cls, kwargs): test = cls(**kwargs) assert isinstance(test.provider_id, six.string_types)", "Apache License, Version 2.0 (the \"License\"). You # may not use this file", "key________\", wrapping_key_type=EncryptionKeyType.SYMMETRIC, ) VALID_KWARGS = { RawMasterKeyConfig: [ dict(key_id=b\"a raw key\", provider_id=\"a provider\",", "provider\", wrapping_key=STATIC_WRAPPING_KEY), ] } @pytest.mark.parametrize(\"cls, kwargs\", all_valid_kwargs(VALID_KWARGS)) def test_attributes_valid_kwargs(cls, kwargs): cls(**kwargs) @pytest.mark.parametrize(\"cls, kwargs\",", "OR CONDITIONS OF # ANY KIND, either express or implied. See the License", "accompanying this file. This file is # distributed on an \"AS IS\" BASIS,", "import EncryptionKeyType, WrappingAlgorithm from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw", "from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey from aws_encryption_sdk.key_providers.base import MasterKeyConfig from aws_encryption_sdk.key_providers.raw import RawMasterKeyConfig from" ]
[ "output_field = FloatField() class XMin(Func): \"\"\"Returns the X minima of a 2D or", "3D bounding box or a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMax\" output_field =", "\"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z minima of a 2D", "the pixel type of the first band of a raster.\"\"\" function = \"ST_MakeLine\"", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMax\" output_field =", "Django's own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import", "= FloatField() class XMin(Func): \"\"\"Returns the X minima of a 2D or 3D", "FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of the first band of a", "LineStringField, PointField from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding", "import GeometryField, LineStringField, PointField from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the", "= FloatField() class YMin(Func): \"\"\"Returns the Y minima of a 2D or 3D", "= \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the X minima of a", "band of a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMax\"", "output_field = GeometryField() class XMax(Func): \"\"\"Returns the X maxima of a 2D or", "the Y minima of a 2D or 3D bounding box or a geometry.\"\"\"", "= \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z minima of a", "function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of", "or a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the", "bounding box or a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class XMin(Func):", "functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import FloatField,", "\"\"\"Django database functions. This module supplements Django's own coverage of Postgres and PostGIS", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMax\" output_field", "functions. This module supplements Django's own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1", "YMax(Func): \"\"\"Returns the Y maxima of a 2D or 3D bounding box or", "= PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of the first band of", "\"\"\"Returns the X minima of a 2D or 3D bounding box or a", "output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of a 2D or", "= \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of the", "3D bounding box or a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class", "= FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of a 2D or 3D", "function = \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the X maxima of", "box or a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns", "pixel type of the first band of a raster.\"\"\" function = \"ST_MakePoint\" output_field", "PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import", "or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField()", "class YMax(Func): \"\"\"Returns the Y maxima of a 2D or 3D bounding box", "own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField,", "class Box3D(Func): \"\"\"Compute the 3D bounding box of a geometry.\"\"\" function = \"Box3D\"", "= FloatField() class ZMin(Func): \"\"\"Returns the Z minima of a 2D or 3D", "= \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the X maxima of a", "bounding box of a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class XMax(Func):", "class XMin(Func): \"\"\"Returns the X minima of a 2D or 3D bounding box", "MakePoint(Func): \"\"\"Compute the pixel type of the first band of a raster.\"\"\" function", "of a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the", "box or a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute", "or 3D bounding box or a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField()", "class MakePoint(Func): \"\"\"Compute the pixel type of the first band of a raster.\"\"\"", "box or a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMax\" output_field", "bounding box or a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class YMax(Func):", "https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import FloatField, Func", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMin\"", "import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding box of a geometry.\"\"\"", "or 3D bounding box or a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField()", "box or a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns", "a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the X", "bounding box or a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func):", "or 3D bounding box or a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField()", "raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type", "geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the Y maxima", "MakeLine(Func): \"\"\"Compute the pixel type of the first band of a raster.\"\"\" function", "the Z maxima of a 2D or 3D bounding box or a geometry.\"\"\"", "from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding box of", "Func class Box3D(Func): \"\"\"Compute the 3D bounding box of a geometry.\"\"\" function =", "the 3D bounding box of a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField()", "minima of a 2D or 3D bounding box or a geometry.\"\"\" function =", "\"\"\"Returns the Y maxima of a 2D or 3D bounding box or a", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMin\"", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMax\"", "\"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the Y maxima of a 2D", "a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z", "= GeometryField() class XMax(Func): \"\"\"Returns the X maxima of a 2D or 3D", "function = \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the Y minima of", "the Y maxima of a 2D or 3D bounding box or a geometry.\"\"\"", "the X minima of a 2D or 3D bounding box or a geometry.\"\"\"", "class XMax(Func): \"\"\"Returns the X maxima of a 2D or 3D bounding box", "box or a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns", "Y minima of a 2D or 3D bounding box or a geometry.\"\"\" function", "the pixel type of the first band of a raster.\"\"\" function = \"ST_MakePoint\"", "FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding box of a geometry.\"\"\" function", "Z maxima of a 2D or 3D bounding box or a geometry.\"\"\" function", "Z minima of a 2D or 3D bounding box or a geometry.\"\"\" function", "= \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of a", "\"\"\"Returns the Y minima of a 2D or 3D bounding box or a", "of the first band of a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0)", "geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z maxima", "class YMin(Func): \"\"\"Returns the Y minima of a 2D or 3D bounding box", "\"\"\"Compute the 3D bounding box of a geometry.\"\"\" function = \"Box3D\" output_field =", "a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z", "pixel type of the first band of a raster.\"\"\" function = \"ST_MakeLine\" output_field", "coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField,", "type of the first band of a raster.\"\"\" function = \"ST_MakePoint\" output_field =", "geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the X minima", "or 3D bounding box or a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField()", "geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the X maxima", "\"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the X maxima of a 2D", "PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of the first band of a", "or a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the", "the Z minima of a 2D or 3D bounding box or a geometry.\"\"\"", "or a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the", "FloatField() class ZMin(Func): \"\"\"Returns the Z minima of a 2D or 3D bounding", "GeometryField() class XMax(Func): \"\"\"Returns the X maxima of a 2D or 3D bounding", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMax\" output_field", "of the first band of a raster.\"\"\" function = \"ST_MakeLine\" output_field = LineStringField(srid=0)", "3D bounding box or a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class", "database functions. This module supplements Django's own coverage of Postgres and PostGIS functions.", "XMax(Func): \"\"\"Returns the X maxima of a 2D or 3D bounding box or", "class ZMin(Func): \"\"\"Returns the Z minima of a 2D or 3D bounding box", "output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z minima of a 2D or", "\"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the Y minima of a 2D", "a geometry.\"\"\" function = \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the X", "output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of the first band", "PointField from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding box", "output_field = FloatField() class YMax(Func): \"\"\"Returns the Y maxima of a 2D or", "geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel type", "and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models", "the first band of a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class", "bounding box or a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class ZMax(Func):", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMin\" output_field", "from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import FloatField, Func class Box3D(Func):", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMax\"", "\"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of the first", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMin\" output_field", "FloatField() class XMin(Func): \"\"\"Returns the X minima of a 2D or 3D bounding", "box of a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns", "first band of a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func):", "function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of", "function = \"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the X minima of", "a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the Y", "type of the first band of a raster.\"\"\" function = \"ST_MakeLine\" output_field =", "\"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import FloatField, Func class", "function = \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of", "GeometryField, LineStringField, PointField from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D", "module supplements Django's own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from", "X maxima of a 2D or 3D bounding box or a geometry.\"\"\" function", "3D bounding box or a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class", "\"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of the first", "output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of the first band", "bounding box or a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class YMin(Func):", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMin\" output_field =", "function = \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the Y maxima of", "geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z minima", "or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField()", "function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the Z minima of", "\"\"\"Returns the Z minima of a 2D or 3D bounding box or a", "ZMax(Func): \"\"\"Returns the Z maxima of a 2D or 3D bounding box or", "a geometry.\"\"\" function = \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the Y", "= \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel type of the", "Y maxima of a 2D or 3D bounding box or a geometry.\"\"\" function", "bounding box or a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func):", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMax\" output_field =", "a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class MakePoint(Func): \"\"\"Compute the pixel", "ZMin(Func): \"\"\"Returns the Z minima of a 2D or 3D bounding box or", "django.contrib.gis.db.models import GeometryField, LineStringField, PointField from django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute", "of a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class XMax(Func): \"\"\"Returns the", "geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the Y minima", "output_field = FloatField() class YMin(Func): \"\"\"Returns the Y minima of a 2D or", "YMin(Func): \"\"\"Returns the Y minima of a 2D or 3D bounding box or", "= FloatField() class MakePoint(Func): \"\"\"Compute the pixel type of the first band of", "3D bounding box of a geometry.\"\"\" function = \"Box3D\" output_field = GeometryField() class", "the X maxima of a 2D or 3D bounding box or a geometry.\"\"\"", "\"\"\"Returns the Z maxima of a 2D or 3D bounding box or a", "class MakeLine(Func): \"\"\"Compute the pixel type of the first band of a raster.\"\"\"", "a raster.\"\"\" function = \"ST_MakePoint\" output_field = PointField(srid=0) class MakeLine(Func): \"\"\"Compute the pixel", "= FloatField() class YMax(Func): \"\"\"Returns the Y maxima of a 2D or 3D", "This module supplements Django's own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\"", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_ZMin\" output_field =", "\"\"\"Returns the X maxima of a 2D or 3D bounding box or a", "= \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the Y minima of a", "FloatField() class YMin(Func): \"\"\"Returns the Y minima of a 2D or 3D bounding", "2D or 3D bounding box or a geometry.\"\"\" function = \"ST_YMin\" output_field =", "or a geometry.\"\"\" function = \"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the", "3D bounding box or a geometry.\"\"\" function = \"ST_ZMin\" output_field = FloatField() class", "or a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class ZMin(Func): \"\"\"Returns the", "or a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns the", "\"ST_YMin\" output_field = FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of a 2D", "XMin(Func): \"\"\"Returns the X minima of a 2D or 3D bounding box or", "maxima of a 2D or 3D bounding box or a geometry.\"\"\" function =", "\"\"\"Compute the pixel type of the first band of a raster.\"\"\" function =", "supplements Django's own coverage of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models", "3D bounding box or a geometry.\"\"\" function = \"ST_ZMax\" output_field = FloatField() class", "\"ST_XMax\" output_field = FloatField() class XMin(Func): \"\"\"Returns the X minima of a 2D", "Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField from", "Box3D(Func): \"\"\"Compute the 3D bounding box of a geometry.\"\"\" function = \"Box3D\" output_field", "a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMin\" output_field", "FloatField() class ZMax(Func): \"\"\"Returns the Z maxima of a 2D or 3D bounding", "X minima of a 2D or 3D bounding box or a geometry.\"\"\" function", "of Postgres and PostGIS functions. https://docs.djangoproject.com/en/4.0/ref/models/expressions/#func-expressions-1 \"\"\" from django.contrib.gis.db.models import GeometryField, LineStringField, PointField", "class ZMax(Func): \"\"\"Returns the Z maxima of a 2D or 3D bounding box", "of a 2D or 3D bounding box or a geometry.\"\"\" function = \"ST_XMin\"", "django.db.models import FloatField, Func class Box3D(Func): \"\"\"Compute the 3D bounding box of a", "FloatField() class YMax(Func): \"\"\"Returns the Y maxima of a 2D or 3D bounding", "box or a geometry.\"\"\" function = \"ST_YMax\" output_field = FloatField() class YMin(Func): \"\"\"Returns", "= \"ST_XMin\" output_field = FloatField() class YMax(Func): \"\"\"Returns the Y maxima of a" ]
[ "timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\",", "pandas as pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from", "array_like is given, the length must be 8784 * If a pandas DataFrame", "from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries", "23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape) ==", "as pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a", "= np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape) == 1: return timeseries[~sel]", "pandas DataFrame or Series is given, time indexes will be used directly Returns", "DataFrame or Series is given, time indexes will be used directly Returns -------", "if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000", "= np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else:", "If a pandas DataFrame or Series is given, time indexes will be used", "pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day == 29), (times.month == 2)) if", "a pandas DataFrame or Series is given, time indexes will be used directly", "29), (times.month == 2)) if len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel,", "leap days from * If something array_like is given, the length must be", "---------- timeseries : array_like The time series data to remove leap days from", "2)) if len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel, :] else: raise", "days from * If something array_like is given, the length must be 8784", "pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a given", "be used directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] ==", "directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return", "00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month == 2)) if", "np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] == 8784: times =", "import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries Parameters ----------", "remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries Parameters ---------- timeseries : array_like", "from * If something array_like is given, the length must be 8784 *", "isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day == 29), (times.month == 2))", "else: raise ResError('Cannot handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or", "== 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else: return timeseries.loc[~sel] else: return remove_leap_day(np.array(timeseries))", "days from a given timeseries Parameters ---------- timeseries : array_like The time series", "freq=\"H\") sel = np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape) == 1:", "== 2)) if len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel, :] else:", "times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month", "given, time indexes will be used directly Returns ------- Array \"\"\" if isinstance(timeseries,", "Series is given, time indexes will be used directly Returns ------- Array \"\"\"", "shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index", "timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day", "time indexes will be used directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray):", "or Series is given, time indexes will be used directly Returns ------- Array", "used directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760:", "== 1: return timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot handle array", ":] else: raise ResError('Cannot handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series)", "8760: return timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\",", "sel = np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel]", "from a given timeseries Parameters ---------- timeseries : array_like The time series data", "numpy as np import pandas as pd from . import ResError def remove_leap_day(timeseries):", "Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0]", "timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel", "or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day == 29), (times.month ==", "elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day ==", "import pandas as pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days", "The time series data to remove leap days from * If something array_like", "= pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month ==", "series data to remove leap days from * If something array_like is given,", "np import pandas as pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes leap", "return timeseries[~sel, :] else: raise ResError('Cannot handle array shape ' + str(timeseries.shape)) elif", "as np import pandas as pd from . import ResError def remove_leap_day(timeseries): \"\"\"Removes", "sel = np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape) == 1: return", "+ str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel =", "import numpy as np import pandas as pd from . import ResError def", "== 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day ==", "(times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else: return timeseries.loc[~sel] else: return", "------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif", "array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times =", "np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else: return", "pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month == 2))", "if len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot", "data to remove leap days from * If something array_like is given, the", "= timeseries.index sel = np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries, pd.Series):", "(times.month == 2)) if len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel, :]", "time series data to remove leap days from * If something array_like is", "handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times", "timeseries : array_like The time series data to remove leap days from *", "== 8760: return timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000", "\"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] ==", "== 29), (times.month == 2)) if len(timeseries.shape) == 1: return timeseries[~sel] else: return", "' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel", "must be 8784 * If a pandas DataFrame or Series is given, time", "Parameters ---------- timeseries : array_like The time series data to remove leap days", "def remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries Parameters ---------- timeseries :", "is given, the length must be 8784 * If a pandas DataFrame or", "a given timeseries Parameters ---------- timeseries : array_like The time series data to", "timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot handle array shape ' +", ". import ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries Parameters", "length must be 8784 * If a pandas DataFrame or Series is given,", "return timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot handle array shape '", "timeseries Parameters ---------- timeseries : array_like The time series data to remove leap", "8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29),", "\"\"\"Removes leap days from a given timeseries Parameters ---------- timeseries : array_like The", "isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] == 8784: times", "given, the length must be 8784 * If a pandas DataFrame or Series", "if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries elif timeseries.shape[0] == 8784:", "Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0] == 8760: return timeseries", "else: return timeseries[~sel, :] else: raise ResError('Cannot handle array shape ' + str(timeseries.shape))", "leap days from a given timeseries Parameters ---------- timeseries : array_like The time", "* If a pandas DataFrame or Series is given, time indexes will be", "given timeseries Parameters ---------- timeseries : array_like The time series data to remove", "remove leap days from * If something array_like is given, the length must", "something array_like is given, the length must be 8784 * If a pandas", "ResError def remove_leap_day(timeseries): \"\"\"Removes leap days from a given timeseries Parameters ---------- timeseries", "isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day == 29),", "* If something array_like is given, the length must be 8784 * If", "times = timeseries.index sel = np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries,", "8784 * If a pandas DataFrame or Series is given, time indexes will", "indexes will be used directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if", "elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\") sel =", "1: return timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot handle array shape", "the length must be 8784 * If a pandas DataFrame or Series is", "\"12-31-2000 23:00:00\", freq=\"H\") sel = np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape)", "str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day", "29), (times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else: return timeseries.loc[~sel] else:", "If something array_like is given, the length must be 8784 * If a", "len(timeseries.shape) == 1: return timeseries[~sel] else: return timeseries[~sel, :] else: raise ResError('Cannot handle", ": array_like The time series data to remove leap days from * If", "be 8784 * If a pandas DataFrame or Series is given, time indexes", "will be used directly Returns ------- Array \"\"\" if isinstance(timeseries, np.ndarray): if timeseries.shape[0]", "pd.Series) or isinstance(timeseries, pd.DataFrame): times = timeseries.index sel = np.logical_and((times.day == 29), (times.month", "timeseries[~sel, :] else: raise ResError('Cannot handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries,", "array_like The time series data to remove leap days from * If something", "raise ResError('Cannot handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries,", "timeseries.index sel = np.logical_and((times.day == 29), (times.month == 2)) if isinstance(timeseries, pd.Series): return", "is given, time indexes will be used directly Returns ------- Array \"\"\" if", "return timeseries elif timeseries.shape[0] == 8784: times = pd.date_range(\"01-01-2000 00:00:00\", \"12-31-2000 23:00:00\", freq=\"H\")", "== 29), (times.month == 2)) if isinstance(timeseries, pd.Series): return timeseries[~sel] else: return timeseries.loc[~sel]", "np.logical_and((times.day == 29), (times.month == 2)) if len(timeseries.shape) == 1: return timeseries[~sel] else:", "ResError('Cannot handle array shape ' + str(timeseries.shape)) elif isinstance(timeseries, pd.Series) or isinstance(timeseries, pd.DataFrame):", "to remove leap days from * If something array_like is given, the length" ]
[ "name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ),", "('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration', name='userType', field=models.CharField(blank=True, max_length=50), ), ]", "migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ],", "migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ]", "verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration', name='userType', field=models.CharField(blank=True, max_length=50), ),", "# Generated by Django 2.2.3 on 2019-07-27 06:29 from django.db import migrations, models", "serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True,", "[ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "by Django 2.2.3 on 2019-07-27 06:29 from django.db import migrations, models class Migration(migrations.Migration):", "models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()),", "primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration', name='userType', field=models.CharField(blank=True,", "serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration', name='userType', field=models.CharField(blank=True, max_length=50),", "migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ],", "'0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "[ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)),", "on 2019-07-27 06:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ),", "('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr',", "class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article',", "models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[", "2019-07-27 06:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations =", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile',", "], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone',", "= [ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration', name='userType',", "operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)),", "('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField( model_name='registration',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)), ], ), migrations.AddField(", "Django 2.2.3 on 2019-07-27 06:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel(", "models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel(", "2.2.3 on 2019-07-27 06:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'), ] operations", "dependencies = [ ('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id',", "] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',", "primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc', models.CharField(max_length=1000)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id',", "= [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1000)), ('desc',", "06:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accounts', '0001_initial'),", "Generated by Django 2.2.3 on 2019-07-27 06:29 from django.db import migrations, models class", "), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_usr', models.IntegerField()), ('phone', models.CharField(max_length=15)),", "('accounts', '0001_initial'), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False," ]
[]
[ "path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path( 'memory/', memory.questionnaire_form,", "django.views.generic import TemplateView from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import views", "'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail',", "'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form',", "path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits,", "'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination", "# -*- coding: utf-8 -*- from django.urls import path from django.views.generic import TemplateView", "'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path(", "from django.views.generic import TemplateView from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import", "), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form,", "), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/',", "path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives,", "path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories", "path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives,", "path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success',", "name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path(", "classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/',", "name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path( 'memory/', memory.questionnaire_form, name='memory_questionnaire_form', ), ]", "<gh_stars>1-10 # -*- coding: utf-8 -*- from django.urls import path from django.views.generic import", "views as classnotes from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import views", "import path from django.views.generic import TemplateView from djforms.alumni.classnotes import views as classnotes from", "as distinguished from djforms.alumni.memory import views as memory urlpatterns = [ path( 'success/',", "distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/',", "), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success',", "from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import views as memory urlpatterns", "memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape,", "djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import views as memory urlpatterns =", "name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path(", "import views as classnotes from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import", "memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ),", "), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/',", "TemplateView from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import views as distinguished", "views as distinguished from djforms.alumni.memory import views as memory urlpatterns = [ path(", "), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives',", "classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ),", "TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success',", "import views as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes", "path from django.views.generic import TemplateView from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished", "from djforms.alumni.memory import views as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ),", "memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path( 'memory/', memory.questionnaire_form, name='memory_questionnaire_form', ),", "as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/',", "import views as distinguished from djforms.alumni.memory import views as memory urlpatterns = [", "path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/',", "utf-8 -*- from django.urls import path from django.views.generic import TemplateView from djforms.alumni.classnotes import", "distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ),", "djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory", "fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path(", "name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'),", "'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path(", "-*- coding: utf-8 -*- from django.urls import path from django.views.generic import TemplateView from", "'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives',", "import TemplateView from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import views as", "from django.urls import path from django.views.generic import TemplateView from djforms.alumni.classnotes import views as", "name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives,", "), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/',", "name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/',", "'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'),", "classnotes from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import views as memory", "= [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ),", "), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'),", "# classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path(", "TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ),", "TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/',", "# distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form',", "'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year',", "'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ),", "'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form',", "memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/',", "distinguished from djforms.alumni.memory import views as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'),", "path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form,", "TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ),", "views as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path(", "path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni", "django.urls import path from django.views.generic import TemplateView from djforms.alumni.classnotes import views as classnotes", "# fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ),", "as classnotes from djforms.alumni.distinguished import views as distinguished from djforms.alumni.memory import views as", "name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/',", "), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/',", "), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ),", "'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits',", "coding: utf-8 -*- from django.urls import path from django.views.generic import TemplateView from djforms.alumni.classnotes", "urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian',", "djforms.alumni.memory import views as memory urlpatterns = [ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), #", "path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond memories path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ),", "classnotes.screenscrape, name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ),", "), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised", "name='classnotes_carthaginian', ), path( 'classnotes/success/', TemplateView.as_view(template_name='alumni/classnotes/done.html'), name='classnotes_success', ), path( 'classnotes/archives/<int:year>/', classnotes.archives, name='classnotes_archives_year', ), path(", "name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path(", "classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path(", "name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path(", "classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ),", "name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), #", "nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), # fond", "-*- from django.urls import path from django.views.generic import TemplateView from djforms.alumni.classnotes import views", "from djforms.alumni.classnotes import views as classnotes from djforms.alumni.distinguished import views as distinguished from", "alumni nomination path( 'distinguished/nomination/success/', TemplateView.as_view(template_name='alumni/data_entered.html'), name='distinguished_nomination_success', ), path( 'distinguished/nomination/', distinguished.nomination_form, name='distinguished_nomination_form', ), #", "classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact, name='classnotes_form', ), # distinguised alumni nomination path(", "), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path( 'memory/',", "path( 'classnotes/archives/', classnotes.archives, name='classnotes_archives', ), path( 'classnotes/inmemoriam/', classnotes.obits, name='classnotes_obits', ), path( 'classnotes/', classnotes.contact,", "[ path( 'success/', TemplateView.as_view(template_name='alumni/data_entered.html'), ), # classnotes path( 'classnotes/carthaginian/', classnotes.screenscrape, name='classnotes_carthaginian', ), path(", "'memory/<int:quid>/detail/', memory.questionnaire_detail, name='memory_questionnaire_detail', ), path( 'memory/<str:campaign>/', memory.questionnaire_form, name='memory_questionnaire_promo_form', ), path( 'memory/', memory.questionnaire_form, name='memory_questionnaire_form',", "path( 'memory/success/', TemplateView.as_view(template_name='alumni/memory/done.html'), name='memory_questionnaire_success', ), path( 'memory/archives/', memory.questionnaire_archives, name='memory_questionnaire_archives', ), path( 'memory/<int:quid>/detail/', memory.questionnaire_detail," ]
[ "arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note: output file name", "1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key,", "= open(output_file_name, 'w+') # skip headers for i in range(2): next(messages, None) #", "'===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message)", "len(invalid)) for x in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close()", "output file name is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if", "default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) == 3 else output_file_name)", "headers for i in range(2): next(messages, None) # write translations try: invalid =", "= [] for message in messages: key = message[0].strip() welsh = message[2].strip() if", "open(output_file_name, 'w+') # skip headers for i in range(2): next(messages, None) # write", "IOError: print(\"Error writing translations\") output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\" if", "print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note: output file name is", "file name is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv)", "[] for message in messages: key = message[0].strip() welsh = message[2].strip() if not", "\"messages.cy\" if len(sys.argv) < 2: print('Error: please provide the source CSV file name", "output_file_name): with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name,", "= \"messages.cy\" if len(sys.argv) < 2: print('Error: please provide the source CSV file", "name]\\n') print('Note: output file name is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1],", "output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh))", "file name including fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV file name]", "i in range(2): next(messages, None) # write translations try: invalid = [] for", "in command line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note:", "0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x in", "import csv import sys def write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages", "2: print('Error: please provide the source CSV file name including fullpath in command", "csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip headers for i in range(2):", "file name]\\n') print('Note: output file name is optional, default name is \"{}\"'.format(output_file_name)) else:", "+ '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records:", "messages: key = message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'):", "if len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh)", "if not key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n')", "not key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n') else:", "'\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ',", "print(\"Error writing translations\") output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv)", "import sys def write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file,", "name] [output file name]\\n') print('Note: output file name is optional, default name is", "next(messages, None) # write translations try: invalid = [] for message in messages:", "invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if __name__ == '__main__':", "print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if __name__ == '__main__': output_file_name", "records: ', len(invalid)) for x in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing", "skip headers for i in range(2): next(messages, None) # write translations try: invalid", "writing translations\") output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv) <", "if __name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error: please", "newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip", "for message in messages: key = message[0].strip() welsh = message[2].strip() if not key:", "len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) >", "def write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file", "output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error:", "output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error: please provide the source CSV", "x in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if __name__", "file name] [output file name]\\n') print('Note: output file name is optional, default name", "with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+')", "key = message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if", "welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x in invalid: print('*", "print('Note: output file name is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2]", "= csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip headers for i in", "[output file name]\\n') print('Note: output file name is optional, default name is \"{}\"'.format(output_file_name))", "== '__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error: please provide the", "elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid))", "delimiter=',') output_file = open(output_file_name, 'w+') # skip headers for i in range(2): next(messages,", "output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key +", "provide the source CSV file name including fullpath in command line arguments') print('Usage:", "# write translations try: invalid = [] for message in messages: key =", "write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file =", "create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note: output file name is optional,", "translations\") output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2:", "message in messages: key = message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n')", "as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip headers", "output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid", "except IOError: print(\"Error writing translations\") output_file.close() if __name__ == '__main__': output_file_name = \"messages.cy\"", "for x in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if", "please provide the source CSV file name including fullpath in command line arguments')", "fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n')", "welsh = message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1:", "source CSV file name including fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV", "= message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if len(key)", "message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if len(key) ==", "__name__ == '__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error: please provide", "CSV file name including fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV file", "+ '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else:", "in messages: key = message[0].strip() welsh = message[2].strip() if not key: output_file.write('\\n') elif", "else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x in invalid: print('* {}'.format(x))", "sys def write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',')", "< 2: print('Error: please provide the source CSV file name including fullpath in", "key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key", "elif key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n')", "invalid = [] for message in messages: key = message[0].strip() welsh = message[2].strip()", "name is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) ==", "> 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x", "key.startswith('#'): if len(key) == 1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif", "translations try: invalid = [] for message in messages: key = message[0].strip() welsh", "else: output_file.write(key + '\\n') elif len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished')", "'__main__': output_file_name = \"messages.cy\" if len(sys.argv) < 2: print('Error: please provide the source", "command line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note: output", "line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file name]\\n') print('Note: output file", "len(sys.argv) < 2: print('Error: please provide the source CSV file name including fullpath", "# skip headers for i in range(2): next(messages, None) # write translations try:", "open(csv_file_name, newline='') as csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') #", "None) # write translations try: invalid = [] for message in messages: key", "name including fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output", "', len(invalid)) for x in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\")", "in range(2): next(messages, None) # write translations try: invalid = [] for message", "len(welsh) > 0: output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for", "in invalid: print('* {}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if __name__ ==", "print('Invalid records: ', len(invalid)) for x in invalid: print('* {}'.format(x)) except IOError: print(\"Error", "the source CSV file name including fullpath in command line arguments') print('Usage: create_welsh_messages.py", "== 1: output_file.write(key + '===================================================\\n') else: output_file.write(key + '\\n') elif len(welsh) > 0:", "{}'.format(x)) except IOError: print(\"Error writing translations\") output_file.close() if __name__ == '__main__': output_file_name =", "'w+') # skip headers for i in range(2): next(messages, None) # write translations", "csv_file: messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip headers for", "[CSV file name] [output file name]\\n') print('Note: output file name is optional, default", "write translations try: invalid = [] for message in messages: key = message[0].strip()", "optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) == 3 else", "= message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key", "message[2].strip() if not key: output_file.write('\\n') elif key.startswith('#'): if len(key) == 1: output_file.write(key +", "range(2): next(messages, None) # write translations try: invalid = [] for message in", "try: invalid = [] for message in messages: key = message[0].strip() welsh =", "including fullpath in command line arguments') print('Usage: create_welsh_messages.py [CSV file name] [output file", "output_file = open(output_file_name, 'w+') # skip headers for i in range(2): next(messages, None)", "invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x in invalid: print('* {}'.format(x)) except", "print('Finished') print('Invalid records: ', len(invalid)) for x in invalid: print('* {}'.format(x)) except IOError:", "csv import sys def write_welsh_translations(csv_file_name, output_file_name): with open(csv_file_name, newline='') as csv_file: messages =", "is optional, default name is \"{}\"'.format(output_file_name)) else: write_welsh_translations(sys.argv[1], sys.argv[2] if len(sys.argv) == 3", "for i in range(2): next(messages, None) # write translations try: invalid = []", "if len(sys.argv) < 2: print('Error: please provide the source CSV file name including", "messages = csv.reader(csv_file, delimiter=',') output_file = open(output_file_name, 'w+') # skip headers for i", "output_file.write('{}={}\\n'.format(key, welsh)) else: invalid.append(message) print('Finished') print('Invalid records: ', len(invalid)) for x in invalid:", "print('Error: please provide the source CSV file name including fullpath in command line" ]
[ "psycopg2 from ..config import AppConfig from ..designpatterns import singleton config = AppConfig() @singleton", "..config import AppConfig from ..designpatterns import singleton config = AppConfig() @singleton class MyApplicationDatabase(object):", "PostgreSQL database, but can be modified for any other. ''' import psycopg2 from", "#for key in self.config.options(\"\"): # db_info[key] = config. return self.pool ### etc. ###", "release): ''' Return the pool of database connections for the database connected. '''", "None def pool(self, release): ''' Return the pool of database connections for the", "in self.config.options(\"\"): # db_info[key] = config. return self.pool ### etc. ### ## TODO:", "deleted if not needed. The example given is for a PostgreSQL database, but", "data source name if self.pool is None: db_info = {} #for key in", "# ----------------------------------- # Database connection setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html", "for the database connected. ''' # ----------------------------------- # Database connection setup & methods", "simply be deleted if not needed. The example given is for a PostgreSQL", "MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self, release): ''' Return the pool", "methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data", "..designpatterns import singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool =", "connections for the database connected. ''' # ----------------------------------- # Database connection setup &", "class MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self, release): ''' Return the", "for a PostgreSQL database, but can be modified for any other. ''' import", "#!/usr/bin/python ''' This file handles a database connection. It can simply be deleted", "database connection. It can simply be deleted if not needed. The example given", "Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if self.pool is None: db_info", "singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None def", "if not needed. The example given is for a PostgreSQL database, but can", "''' # ----------------------------------- # Database connection setup & methods # ----------------------------------- # Ref:", "# db_info[key] = config. return self.pool ### etc. ### ## TODO: create a", "pool of database connections for the database connected. ''' # ----------------------------------- # Database", "key in self.config.options(\"\"): # db_info[key] = config. return self.pool ### etc. ### ##", "the pool of database connections for the database connected. ''' # ----------------------------------- #", "### ## TODO: create a sample db file for PostgreSQL, SQLite, and SQLAlchemy", "= AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self, release):", "self.config.options(\"\"): # db_info[key] = config. return self.pool ### etc. ### ## TODO: create", "AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self, release): '''", "pool(self, release): ''' Return the pool of database connections for the database connected.", "''' import psycopg2 from ..config import AppConfig from ..designpatterns import singleton config =", "<reponame>demitri/DtU_api #!/usr/bin/python ''' This file handles a database connection. It can simply be", "can simply be deleted if not needed. The example given is for a", "db_info = {} #for key in self.config.options(\"\"): # db_info[key] = config. return self.pool", "----------------------------------- # Database connection setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html #", "a database connection. It can simply be deleted if not needed. The example", "import psycopg2 from ..config import AppConfig from ..designpatterns import singleton config = AppConfig()", "# Database connection setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref:", "db_info[key] = config. return self.pool ### etc. ### ## TODO: create a sample", "# ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source", "from ..designpatterns import singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool", "# Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if", "def __init__(self): self.pool = None def pool(self, release): ''' Return the pool of", "handles a database connection. It can simply be deleted if not needed. The", "config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self,", "# dsn = data source name if self.pool is None: db_info = {}", "This file handles a database connection. It can simply be deleted if not", "file handles a database connection. It can simply be deleted if not needed.", "self.pool is None: db_info = {} #for key in self.config.options(\"\"): # db_info[key] =", "database connections for the database connected. ''' # ----------------------------------- # Database connection setup", "a PostgreSQL database, but can be modified for any other. ''' import psycopg2", "database, but can be modified for any other. ''' import psycopg2 from ..config", "of database connections for the database connected. ''' # ----------------------------------- # Database connection", "the database connected. ''' # ----------------------------------- # Database connection setup & methods #", "be deleted if not needed. The example given is for a PostgreSQL database,", "can be modified for any other. ''' import psycopg2 from ..config import AppConfig", "but can be modified for any other. ''' import psycopg2 from ..config import", "__init__(self): self.pool = None def pool(self, release): ''' Return the pool of database", "other. ''' import psycopg2 from ..config import AppConfig from ..designpatterns import singleton config", "None: db_info = {} #for key in self.config.options(\"\"): # db_info[key] = config. return", "any other. ''' import psycopg2 from ..config import AppConfig from ..designpatterns import singleton", "return self.pool ### etc. ### ## TODO: create a sample db file for", "given is for a PostgreSQL database, but can be modified for any other.", "Return the pool of database connections for the database connected. ''' # -----------------------------------", "connection. It can simply be deleted if not needed. The example given is", "Database connection setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool", "def pool(self, release): ''' Return the pool of database connections for the database", "= config. return self.pool ### etc. ### ## TODO: create a sample db", "import singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None", "Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if self.pool", "connection setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool #", "config. return self.pool ### etc. ### ## TODO: create a sample db file", "### etc. ### ## TODO: create a sample db file for PostgreSQL, SQLite,", "for any other. ''' import psycopg2 from ..config import AppConfig from ..designpatterns import", "database connected. ''' # ----------------------------------- # Database connection setup & methods # -----------------------------------", "etc. ### ## TODO: create a sample db file for PostgreSQL, SQLite, and", "is None: db_info = {} #for key in self.config.options(\"\"): # db_info[key] = config.", "''' This file handles a database connection. It can simply be deleted if", "The example given is for a PostgreSQL database, but can be modified for", "http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if self.pool is None: db_info =", "& methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn =", "''' Return the pool of database connections for the database connected. ''' #", "# Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if self.pool is None:", "name if self.pool is None: db_info = {} #for key in self.config.options(\"\"): #", "{} #for key in self.config.options(\"\"): # db_info[key] = config. return self.pool ### etc.", "is for a PostgreSQL database, but can be modified for any other. '''", "dsn = data source name if self.pool is None: db_info = {} #for", "from ..config import AppConfig from ..designpatterns import singleton config = AppConfig() @singleton class", "not needed. The example given is for a PostgreSQL database, but can be", "AppConfig from ..designpatterns import singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def __init__(self):", "= data source name if self.pool is None: db_info = {} #for key", "It can simply be deleted if not needed. The example given is for", "http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name if self.pool is", "----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn = data source name", "= None def pool(self, release): ''' Return the pool of database connections for", "self.pool = None def pool(self, release): ''' Return the pool of database connections", "= {} #for key in self.config.options(\"\"): # db_info[key] = config. return self.pool ###", "needed. The example given is for a PostgreSQL database, but can be modified", "source name if self.pool is None: db_info = {} #for key in self.config.options(\"\"):", "modified for any other. ''' import psycopg2 from ..config import AppConfig from ..designpatterns", "setup & methods # ----------------------------------- # Ref: http://initd.org/psycopg/docs/module.html # Ref: http://packages.python.org/psycopg2/pool.html#module-psycopg2.pool # dsn", "be modified for any other. ''' import psycopg2 from ..config import AppConfig from", "connected. ''' # ----------------------------------- # Database connection setup & methods # ----------------------------------- #", "self.pool ### etc. ### ## TODO: create a sample db file for PostgreSQL,", "@singleton class MyApplicationDatabase(object): def __init__(self): self.pool = None def pool(self, release): ''' Return", "import AppConfig from ..designpatterns import singleton config = AppConfig() @singleton class MyApplicationDatabase(object): def", "if self.pool is None: db_info = {} #for key in self.config.options(\"\"): # db_info[key]", "example given is for a PostgreSQL database, but can be modified for any" ]
[ "def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b", "FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "self.rho * self.wG) self.wL = s * (1 - (1 - self.rho) *", "self.mu val = ((1.0 + (x / self.gamma) ** 2) ** (-1.5)) /", "1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real / (self.sigma *", "- self.rho) * self.wL) self.wI = s * self.wI self.wH = s *", "+ (1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable", "of the profile, defaults to 1. Returns ------- Lorentzian Callable instance, evaluates the", "0. amp: float Amplitude of the profile, defaults to 1. Returns ------- Lorentzian", "wofz from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational',", "* self.wG) self.wL = s * (1 - (1 - self.rho) * self.wL)", "value): self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor", "self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full", "isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G, L", "-1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d", "\\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None,", "amp: float Amplitude of the profile, defaults to 1. Returns ------- Lorentzian Callable", "mu if mu is not None else 0.0 self.amp = amp if amp", "/ self.sigma) ** 2) normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart /", "Width At Half Maximum of the components, defaults to 1. Ordered as Gaussian,", "else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.setParams() def", "-73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688,", "-23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021,", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "(2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile.", "self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma = seperate", "if self._n > 1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "(2 * np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e", "= s * self.wI self.wH = s * self.wH self.nL = self.rho *", "s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "* (1 - (1 - self.rho) * self.wL) self.wI = s * self.wI", "1. Attributes ---------- totalfwhm: float Approximation of the width based on the underlying", "self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt", "'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi = (2 *", "then Lorentzian. mu: float Location of the center, defaults to 0. amp: float", "1.0 / (self.gamma * np.pi) def __call__(self, x): x = x - self.mu", "@property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma", "4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3) + 0.07842 * self.fwhmG", "<<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np from scipy.special import", "if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi) def __call__(self, x):", "def __call__(self, x): x = x - self.mu topPart = self.gamma bottomPart =", "* np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor = 1.0 / (2 *", "super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm: float", "value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1) if not", ".. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None,", "G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2) + 4.47163 * (self.fwhmG", "self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) def __call__(self, x): x =", "(1 - self.rho) * self.wL) self.wI = s * self.wI self.wH = s", "the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed for the Gaussian", "[3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822,", "defaults to 1. Ordered as Gaussian, then Lorentzian. mu: float Location of the", "(2 * self.gamma) def __call__(self, x): x = x - self.mu val =", "the arguments supplied. Note ---- The used formula is taken from T. Ida", "value self.G.fwhm = value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor", "2.69269 * (self.fwhmG ** 4) * self.fwhmL + 2.42843 * (self.fwhmG ** 3)", "super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "Returns ------- ExtendedVoigt Callable instance, evaluates the extended Voigt profile in the arguments", "-24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717,", "r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width At Half", "\\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta)", "1.0 else: factor = self._normFactor vals = vals / factor return self.amp *", "def __call__(self, x): x = x - self.mu expPart = np.exp(-0.5 * (x", "class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width", "__call__(self, x): x = x - self.mu Gauss = (1 - self.nL -", "= top def __call__(self, x): x = x - self.mu z = (x", "n(self, value): value = np.abs(value) if value > 1: value = value -", "1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "+ 2.69269 * (self.fwhmG ** 4) * self.fwhmL + 2.42843 * (self.fwhmG **", "self.gamma) def __call__(self, x): x = x - self.mu coshPart = (1.0 /", "self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho)", "self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H", "mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", ".. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu)", "webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def", "+ \\ (0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5", "np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862,", "* np.pi) def __call__(self, x): x = x - self.mu topPart = self.gamma", "**kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "fwhm: list of 2 floats Full Width At Half Maximum, defaults to 1,", ":cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu,", "np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1)", "self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value):", "* (1 - self.rho) * self.nI self.nH = self.rho * (1 - self.rho)", "= Gaussian(**kwargs) self._n = np.abs(eta) if eta is not None else 0.5 if", "moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np from scipy.special import wofz from", "@fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = 0.5 * self.fwhm if", "amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm is", "self.nH * self.H(0) val = Gauss + Lorentz + Irrat + Hyper self._normFactor", "the underlying widths. Returns ------- Voigt Callable instance, evaluates the Voigt profile in", "Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269", "{\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu,", "* (1 - self.rho * self.wG) self.wL = s * (1 - (1", "/ (self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt,", "np from scipy.special import wofz from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian',", "* np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None,", "- self.nH) * self.G(0) Lorentz = self.nL * self.L(0) Irrat = self.nI *", "math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None,", "= np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029,", "isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm =", "* self.H(x) val = Gauss + Lorentz + Irrat + Hyper return super(ExtendedVoigt,", "sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object): def", "** 2) ** (-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile):", "inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &=", "top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top def __call__(self, x):", "self.n) * self.G(0) def __call__(self, x): x = x - self.mu val =", "amp: float Amplitude of the profile, defaults to 1. Returns ------- PseudoVoigt Callable", "- self.nI - self.nH) * self.G(0) Lorentz = self.nL * self.L(0) Irrat =", "1. Attributes ---------- totalfwhm: float Approximation of the total width, based on the", "H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "* self.wL) self.wI = s * self.wI self.wH = s * self.wH self.nL", "---- The used formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math::", "self._n = value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor +=", "of the total width, based on the underlying widths. Returns ------- ExtendedVoigt Callable", "self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm:", "instance, evaluates the Voigt profile in the arguments supplied. Note ---- The formula", "self._fwhmNorm if not self.ampIsArea: z = (0 + 1j * self.gamma) / (self.sigma", "top = wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A", "= Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 +", "Voigt Callable instance, evaluates the Voigt profile in the arguments supplied. Note ----", "self._normFactor = self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) def", "return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm /", "-21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215,", "self.ampIsArea: factor = 1.0 else: factor = self._normFactor vals = vals / factor", "2 + self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class", "&= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian,", "self.amp) return s def __call__(self, vals): if self.ampIsArea: factor = 1.0 else: factor", "-11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429,", "seperate self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2", "= self.nH * self.H(x) val = Gauss + Lorentz + Irrat + Hyper", "FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "self.fwhmV = (self.fwhmG ** 5 + 2.69269 * (self.fwhmG ** 4) * self.fwhmL", "of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\"", "+ \\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) else: self.fwhmG,", "* self.G(x) Lorentz = self.nL * self.L(x) Irrat = self.nI * self.I(x) Hyper", "as Gaussian and Lorentzian width. mu: float Location of the center, defaults to", "interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2", "g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "arguments supplied. Note ---- The formula used is taken from the Wikipedia webpage", "x - self.mu expPart = np.exp(-0.5 * (x / self.sigma) ** 2) normPart", "= value self.gamma = 0.5 * self.fwhm if not self.ampIsArea: self._normFactor = 1.0", "used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately", "(1.0 / 5) if not self.ampIsArea: Gauss = (1 - self.nL - self.nI", "\\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm", "totalfwhm: float Approximation of the total width, based on the underlying widths. Returns", "9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437])", ":platform: Windows :synopsis: Implementation of classes for different lineshapes, creating callables for easy", "not self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi) def __call__(self, x): x", "Implementation of classes for different lineshapes, creating callables for easy and intuitive calculations.", "2) ** (-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A", "= value, value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm /", "of the profile, defaults to 1. Returns ------- Hyperbolic Callable instance, evaluates the", "pseudovoigt profile in the arguments supplied. Note ---- The formula used is taken", "Lorentz = self.nL * self.L(x) Irrat = self.nI * self.I(x) Hyper = self.nH", "amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals): if", "expPart = np.exp(-0.5 * (x / self.sigma) ** 2) normPart = self.sigma *", "self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized", "ExtendedVoigt Callable instance, evaluates the extended Voigt profile in the arguments supplied. Note", "the total width, based on the underlying widths. Returns ------- ExtendedVoigt Callable instance,", "fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL =", "np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL =", "Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm", "is taken from T. Ida et al. :cite:`Ida2000`, code inspired by the PhD", "= 2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self,", "by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right)", "self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH", "super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not None", "inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu,", "self.nI - self.nH) * self.G(0) Lorentz = self.nL * self.L(0) Irrat = self.nI", "= self.gamma bottomPart = (x ** 2 + self.gamma ** 2) * np.pi", "- self.n) * self.G(0) @property def n(self): return self._n @n.setter def n(self, value):", "self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "self.amp = amp if amp is not None else 1.0 def __repr__(self): s", "else 1.0 def __repr__(self): s = str(type(self)) + 'FWHM: {}, mu: {}, amp:", "return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters", "profile in the arguments supplied. Note ---- The formula used is taken from", "Callable instance, evaluates the pseudovoigt profile in the arguments supplied. Note ---- The", "in the arguments supplied. Note ---- The formula used is taken from the", "self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG", "= ((1.0 + (x / self.gamma) ** 2) ** (-1.5)) / (2 *", "- 1) if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2 *", "to 1. Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic profile in the", "instance, evaluates the hyperbolic profile in the arguments supplied. Note ---- The used", "**self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269 *", "return s def __call__(self, vals): if self.ampIsArea: factor = 1.0 else: factor =", "* value self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984,", "r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm: float Full Width At Half", "@property def n(self): return self._n @n.setter def n(self, value): value = np.abs(value) if", "= 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG", "fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta)", "if not self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma) def __call__(self, x):", "&= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "__call__(self, x): x = x - self.mu z = (x + 1j *", "&= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared,", "MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &=", "self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters", "1.0 def __repr__(self): s = str(type(self)) + 'FWHM: {}, mu: {}, amp: {}'", "vals = vals / factor return self.amp * vals class Gaussian(Profile): r\"\"\"A callable", "(self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile.", "\\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma,", "components, defaults to 1. Ordered as Gaussian, then Lorentzian. mu: float Location of", "defaults to 1. Returns ------- Lorentzian Callable instance, evaluates the Lorentzian profile in", "top def __call__(self, x): x = x - self.mu z = (x +", "not None else 1.0 def __repr__(self): s = str(type(self)) + 'FWHM: {}, mu:", "3) * (self.fwhmL ** 2) + 4.47163 * (self.fwhmG ** 2) * (self.fwhmL", "__init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm)", "22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815,", "arguments supplied. Note ---- The formula used is taken from the MathWorld webpage", "r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm: list of 2 floats Full", "self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm:", "normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A", "x - self.mu Gauss = (1 - self.nL - self.nI - self.nH) *", "= value[0:2] self.fwhmG, self.fwhmL = seperate G, L = seperate self._fwhm = 0.5346", "<<EMAIL>> \"\"\" import numpy as np from scipy.special import wofz from scipy.interpolate import", "= (2 * np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))", "= np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614,", "arguments supplied. Note ---- The formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation,", "for the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right)", "self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma) def __call__(self, x): x =", "Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to 1. mu:", "total width, based on the underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates", "and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import", "self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2)", "g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array(", "\\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm: float Full Width", "self.mu z = (x + 1j * self.gamma) / (self.sigma * sqrt2) top", "np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h,", "c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array(", "self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma) def __call__(self, x):", "evaluates the irrational profile in the arguments supplied. Note ---- The used formula", "Callable instance, evaluates the Lorentzian profile in the arguments supplied. Note ---- The", "- self.nH) * self.G(x) Lorentz = self.nL * self.L(x) Irrat = self.nI *", "**kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value", "Parameters ---------- fwhm: list of 2 floats Full Width At Half Maximum of", "5) if not self.ampIsArea: Gauss = (1 - self.nL - self.nI - self.nH)", "on the underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates the extended Voigt", "self.nH) * self.G(x) Lorentz = self.nL * self.L(x) Irrat = self.nI * self.I(x)", "1: value = value - int(value) self._n = value if not self.ampIsArea: self._normFactor", "self.fwhmL = seperate G, L = seperate self._fwhm = 0.5346 * self.fwhmL +", "Irrational profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to", "self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho)", "self._n = np.abs(eta) if eta is not None else 0.5 if self._n >", "mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm", "in the arguments supplied. Note ---- The used formula is taken from the", "if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm", "np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407,", "self.mu Gauss = (1 - self.nL - self.nI - self.nH) * self.G(x) Lorentz", "** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1) class", "coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2 simplePart = 2 *", "1.0 / (2 * self.gamma) def __call__(self, x): x = x - self.mu", "= 0.6144031129489123 * value self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291,", "2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "= ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5", "Full Width At Half Maximum, defaults to 1. mu: float Location of the", "The used formula is taken from T. Ida et al. :cite:`Ida2000`, code inspired", "** 2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value, value self._fwhm", "At Half Maximum of the components, defaults to 1. Ordered as Gaussian, then", "self.rho) * self.wL) self.wI = s * self.wI self.wH = s * self.wH", "self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1)", "= self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) def __call__(self,", "2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1) if not self.ampIsArea:", ".. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np from scipy.special import wofz", "lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma,", "2 + self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm", "vals / factor return self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian", "= mu if mu is not None else 0.0 self.amp = amp if", "math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) +", "Amplitude of the profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of", "self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho)", "Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH,", "Amplitude of the profile, defaults to 1. Returns ------- PseudoVoigt Callable instance, evaluates", "/ (2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared", "Lorentzian Callable instance, evaluates the Lorentzian profile in the arguments supplied. Note ----", "in the arguments supplied. Note ---- The used formula is taken from T.", "[-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL +", "self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not None else np.abs(1.0)", "** 2 + self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma = seperate /", "np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f,", "profile. Parameters ---------- fwhm: list of 2 floats Full Width At Half Maximum,", "** 2 simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class", "from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed for the", ":cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses", "the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the values supplied", "2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized", "= self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor =", "* self.nL) self.nI = self.rho * (1 - self.rho) * self.nI self.nH =", "is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu}", "Gauss = (1 - self.nL - self.nI - self.nH) * self.G(x) Lorentz =", "-15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186])", "1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho)", "1. Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile in the arguments", "intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy", "return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm = value self.G.fwhm", "profile, defaults to 1. Returns ------- Gaussian Callable instance, evaluates the Gaussian profile", "__call__(self, x): x = x - self.mu val = ((1.0 + (x /", "= value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1) if", "\\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs):", "self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not None else", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu,", "instance, evaluates the Lorentzian profile in the arguments supplied. Note ---- The formula", "if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G,", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None,", "2) else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.setParams()", "factor return self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters", "x = x - self.mu Gauss = (1 - self.nL - self.nI -", "FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters ---------- fwhm: list of 2", "= self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b,", "self.mu, self.amp) return s def __call__(self, vals): if self.ampIsArea: factor = 1.0 else:", "* self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2)", "the arguments supplied. Note ---- The formula used is taken from the MathWorld", "/ 3) - 1) if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) /", "def fwhm(self, value): self._fwhm = value self.L.fwhm = value self.G.fwhm = value if", "setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b =", "is not None else 1.0 def __repr__(self): s = str(type(self)) + 'FWHM: {},", "5 ) ** (1.0 / 5) if not self.ampIsArea: Gauss = (1 -", "= Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta is not None", "self.nL * self.L(x) Irrat = self.nI * self.I(x) Hyper = self.nH * self.H(x)", "x): x = x - self.mu expPart = np.exp(-0.5 * (x / self.sigma)", "based on the underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates the extended", "r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full Width At Half", "the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "on the underlying widths. Returns ------- Voigt Callable instance, evaluates the Voigt profile", "self.fwhmG, self.fwhmL = seperate G, L = seperate self._fwhm = 0.5346 * self.fwhmL", "defaults to 1. Attributes ---------- totalfwhm: float Approximation of the width based on", "Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs", "\\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL", "\\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "= value self.L.fwhm = value self.G.fwhm = value if not self.ampIsArea: self._normFactor =", "formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is", "The formula used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the", "@fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2) if", "class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm: float Full Width", "@fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm = value self.G.fwhm = value", "f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array(", "2) ** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL =", "is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed", "self.fwhmL ** 5 ) ** (1.0 / 5) if not self.ampIsArea: Gauss =", "2) + 4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3) + 0.07842", "1)) if not self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma) def __call__(self,", "np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL", "fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) +", "profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to 1.", "amp: float Amplitude of the profile, defaults to 1. Attributes ---------- totalfwhm: float", "* self.L(0) Irrat = self.nI * self.I(0) Hyper = self.nH * self.H(0) val", "1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f =", "different lineshapes, creating callables for easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>>", "* np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt", "- self.mu val = ((1.0 + (x / self.gamma) ** 2) ** (-1.5))", "self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm:", "Gaussian, then Lorentzian. mu: float Location of the center, defaults to 0. amp:", "= 1.0 / (2 * self.gamma) def __call__(self, x): x = x -", "self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI", "= seperate G, L = seperate self._fwhm = 0.5346 * self.fwhmL + \\", "* self.wI self.wH = s * self.wH self.nL = self.rho * (1 +", "= s * (1 - self.rho * self.wG) self.wL = s * (1", "profiles :platform: Windows :synopsis: Implementation of classes for different lineshapes, creating callables for", "callable normalized Lorentzian profile. Parameters ---------- fwhm: float Full Width At Half Maximum,", "self.ampIsArea: z = (0 + 1j * self.gamma) / (self.sigma * sqrt2) top", "scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2", "to 1, ordered as Gaussian and Lorentzian width. mu: float Location of the", "appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right)", "-0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h =", "------- Lorentzian Callable instance, evaluates the Lorentzian profile in the arguments supplied. Note", "import numpy as np from scipy.special import wofz from scipy.interpolate import interp1d __all__", "0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL ** 2 + self.fwhmG **", "def __call__(self, x): x = x - self.mu Gauss = (1 - self.nL", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "value = np.abs(value) if value > 1: value = value - int(value) self._n", "value self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma *", "width based on the underlying widths. Returns ------- Voigt Callable instance, evaluates the", "class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea", "2 floats Full Width At Half Maximum of the components, defaults to 1.", "amp: float Amplitude of the profile, defaults to 1. Returns ------- Hyperbolic Callable", "factor = self._normFactor vals = vals / factor return self.amp * vals class", "et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. This", "easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\"", "Callable instance, evaluates the Voigt profile in the arguments supplied. Note ---- The", "for easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>>", "function, and the values supplied as FWHM are appropriately transformed to :math:`\\sigma` and", "amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "* self.H(0) val = Gauss + Lorentz + Irrat + Hyper self._normFactor =", "value self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021,", "- self.mu topPart = self.gamma bottomPart = (x ** 2 + self.gamma **", "------- Gaussian Callable instance, evaluates the Gaussian profile in the arguments supplied. Note", "fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "x): x = x - self.mu Gauss = (1 - self.nL - self.nI", "supplied. Note ---- Formula taken from <NAME> et al. :cite:`Ida2000`, code inspired by", "val = self.n * self.L(x) + (1.0 - self.n) * self.G(x) return super(PseudoVoigt,", "**kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "arguments supplied. Note ---- The used formula is taken from the MathWorld webpage", "amp: float Amplitude of the profile, defaults to 1. Returns ------- Irrational Callable", "-39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG", "self.wL) self.wI = s * self.wI self.wH = s * self.wH self.nL =", "seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G, L = seperate self._fwhm =", "* self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) @property def n(self): return", "np.exp(-0.5 * (x / self.sigma) ** 2) normPart = self.sigma * sqrt2pi return", "as np from scipy.special import wofz from scipy.interpolate import interp1d __all__ = ['Gaussian',", "formula used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu,", "(self.fwhmG ** 5 + 2.69269 * (self.fwhmG ** 4) * self.fwhmL + 2.42843", "= self.nH * self.H(0) val = Gauss + Lorentz + Irrat + Hyper", "return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm:", "= amp if amp is not None else 1.0 def __repr__(self): s =", "amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927,", "= np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI =", "a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array(", "3) + 0.07842 * self.fwhmG * (self.fwhmL ** 4) + self.fwhmL ** 5", "(1 - self.rho) * self.nI self.nH = self.rho * (1 - self.rho) *", "** (1.0 / 5) if not self.ampIsArea: Gauss = (1 - self.nL -", "- self.rho) * self.nL) self.nI = self.rho * (1 - self.rho) * self.nI", "not None else 0.5 if self._n > 1: self._n = self._n - int(self._n)", ") ** (1.0 / 5) if not self.ampIsArea: Gauss = (1 - self.nL", "2) * (self.fwhmL ** 3) + 0.07842 * self.fwhmG * (self.fwhmL ** 4)", "self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5", "self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) else:", "Irrational Callable instance, evaluates the irrational profile in the arguments supplied. Note ----", "np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023,", "= self.nL * self.L(0) Irrat = self.nI * self.I(0) Hyper = self.nH *", "amp if amp is not None else 1.0 def __repr__(self): s = str(type(self))", "= np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974,", "* self.fwhmL + 2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2) +", "(list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G, L =", "to 1. Attributes ---------- totalfwhm: float Approximation of the total width, based on", "is appropriately transformed for the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu,", "self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho)", "= value self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma", "'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi = (2 * np.pi)", "Approximation of the total width, based on the underlying widths. Returns ------- ExtendedVoigt", "self._fwhm = value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if", "np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G, L = seperate self._fwhm", "**self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs)", "\"\"\" import numpy as np from scipy.special import wofz from scipy.interpolate import interp1d", "self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea: z = (0 + 1j", "self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG,", "self.sigma) ** 2) normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart)", "et al. :cite:`Ida2000`, code inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`.", "Gaussian(**kwargs) self._n = np.abs(eta) if eta is not None else 0.5 if self._n", "base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__()", "self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi)", "as FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma,", "int(value) self._n = value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor", "= (x + 1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real", "- (1 - self.rho) * self.wL) self.wI = s * self.wI self.wH =", "[-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491,", "Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\"", "value): self._fwhm = value self.gamma = 0.5 * self.fwhm if not self.ampIsArea: self._normFactor", "defaults to 1, ordered as Gaussian and Lorentzian width. mu: float Location of", "PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses a weighted sum of the", "+ self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123", "1, ordered as Gaussian and Lorentzian width. mu: float Location of the center,", "self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL ** 2 +", "scipy.special import wofz from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt',", "* (1 + (1 - self.rho) * self.nL) self.nI = self.rho * (1", "\\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm,", "------- ExtendedVoigt Callable instance, evaluates the extended Voigt profile in the arguments supplied.", "None else np.abs(1.0) self.mu = mu if mu is not None else 0.0", "value self.gamma = 0.5 * self.fwhm if not self.ampIsArea: self._normFactor = 1.0 /", "to 0. amp: float Amplitude of the profile, defaults to 1. Returns -------", "= np.abs(fwhm) if fwhm is not None else np.abs(1.0) self.mu = mu if", "of the profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of the", "Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta", "Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm: float Full Width At", "arguments supplied. Note ---- The used formula is taken from T. Ida et", "* (self.fwhmG ** 4) * self.fwhmL + 2.42843 * (self.fwhmG ** 3) *", "self.mu = mu if mu is not None else 0.0 self.amp = amp", "the Lorentzian profile in the arguments supplied. Note ---- The formula used is", "class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters ---------- fwhm: list of", "1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL", "(2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor = 1.0 / (2", "the underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates the extended Voigt profile", "wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized", "the profile, defaults to 1. Returns ------- Lorentzian Callable instance, evaluates the Lorentzian", "return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm:", "+ 1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real / (self.sigma", "normalized Voigt profile. Parameters ---------- fwhm: list of 2 floats Full Width At", "fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm = value", "= self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) **", "Note ---- The formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the", "with :math:`w(z)` the Faddeeva function, and the values supplied as FWHM are appropriately", "uses a weighted sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def", "Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the values supplied as", "fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "+= (1.0 - self.n) * self.G(0) def __call__(self, x): x = x -", "mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "Faddeeva function, and the values supplied as FWHM are appropriately transformed to :math:`\\sigma`", "def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 /", "value): self._fwhm = value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1))", "= value self.G.fwhm = value if not self.ampIsArea: self._normFactor = self.n * self.L(0)", "value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea:", "the PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses a weighted sum of", "s * self.wH self.nL = self.rho * (1 + (1 - self.rho) *", "* self.fwhmL + \\ (0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2)", "= (self.sigma * sqrt2pi) ** (-1) def __call__(self, x): x = x -", "self.rho) self.nH = np.polyval(h, self.rho) self.wG = s * (1 - self.rho *", "* np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False):", "= HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269 * (self.fwhmG **", "** 2) else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value", "profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of the width based", "This class uses a weighted sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared", "super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ----------", "x - self.mu z = (x + 1j * self.gamma) / (self.sigma *", "* (x / self.sigma) ** 2) normPart = self.sigma * sqrt2pi return super(Gaussian,", "np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma)", "+ self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile):", "(self.fwhmG ** 2) * (self.fwhmL ** 3) + 0.07842 * self.fwhmG * (self.fwhmL", "self.nL - self.nI - self.nH) * self.G(0) Lorentz = self.nL * self.L(0) Irrat", "* sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters", "self._fwhm = value self.gamma = 0.5 * self.fwhm if not self.ampIsArea: self._normFactor =", "Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`.", "from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared']", "self.nI self.nH = self.rho * (1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG,", "code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses a", "profile, defaults to 1. Returns ------- Irrational Callable instance, evaluates the irrational profile", "taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed for", "value, value self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self): a = np.array(", "= 0.5 * self.fwhm if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma *", "def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm =", "thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2}", "* sqrt2pi) self._normFactor = top def __call__(self, x): x = x - self.mu", "val = Gauss + Lorentz + Irrat + Hyper self._normFactor = val def", "normalized Gaussian profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults", "* self.L(x) Irrat = self.nI * self.I(x) Hyper = self.nH * self.H(x) val", "= self._fwhm / self._fwhmNorm if not self.ampIsArea: z = (0 + 1j *", "of the components, defaults to 1. Ordered as Gaussian, then Lorentzian. mu: float", "self.nH * self.H(x) val = Gauss + Lorentz + Irrat + Hyper return", "(1 + (1 - self.rho) * self.nL) self.nI = self.rho * (1 -", "self._normFactor = 1.0 / (2 * self.gamma) def __call__(self, x): x = x", "x - self.mu val = ((1.0 + (x / self.gamma) ** 2) **", "math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "(-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized", "simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full", "= self.nL * self.L(x) Irrat = self.nI * self.I(x) Hyper = self.nH *", "- self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended", "str(type(self)) + 'FWHM: {}, mu: {}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp)", "the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM", "the Voigt profile in the arguments supplied. Note ---- The formula used is", "from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM", "val = ((1.0 + (x / self.gamma) ** 2) ** (-1.5)) / (2", "Attributes ---------- totalfwhm: float Approximation of the total width, based on the underlying", "self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm:", "formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &=", "= self.nI * self.I(0) Hyper = self.nH * self.H(0) val = Gauss +", "code inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x;", "z = (0 + 1j * self.gamma) / (self.sigma * sqrt2) top =", "\\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu,", "self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 *", "callable normalized Gaussian profile. Parameters ---------- fwhm: float Full Width At Half Maximum,", "from scipy.special import wofz from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt',", "+ (x / self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma) return", "= x - self.mu coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2", "supplied. Note ---- The formula used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html:", "self.rho) * self.nL) self.nI = self.rho * (1 - self.rho) * self.nI self.nH", "\\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm,", "else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma", "self.nL * self.L(0) Irrat = self.nI * self.I(0) Hyper = self.nH * self.H(0)", "= (self.fwhmG ** 5 + 2.69269 * (self.fwhmG ** 4) * self.fwhmL +", "self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL =", "of classes for different lineshapes, creating callables for easy and intuitive calculations. ..", "x = x - self.mu expPart = np.exp(-0.5 * (x / self.sigma) **", "+ Lorentz + Irrat + Hyper self._normFactor = val def __call__(self, x): x", "1. Returns ------- Lorentzian Callable instance, evaluates the Lorentzian profile in the arguments", "profile, defaults to 1. Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic profile", "1. Returns ------- Irrational Callable instance, evaluates the irrational profile in the arguments", "self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma) def __call__(self, x): x", "used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right)", "if not self.ampIsArea: z = (0 + 1j * self.gamma) / (self.sigma *", "instance, evaluates the irrational profile in the arguments supplied. Note ---- The used", "* self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) def __call__(self, x): x", "self.I(x) Hyper = self.nH * self.H(x) val = Gauss + Lorentz + Irrat", "__all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 **", "__call__(self, x): x = x - self.mu val = self.n * self.L(x) +", "self.fwhmL ** 2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value, value", "= np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH =", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "the Gaussian profile in the arguments supplied. Note ---- The used formula is", "evaluates the hyperbolic profile in the arguments supplied. Note ---- The used formula", "if eta is not None else 0.5 if self._n > 1: self._n =", "= x - self.mu expPart = np.exp(-0.5 * (x / self.sigma) ** 2)", "self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A", "self._fwhm / self._fwhmNorm if not self.ampIsArea: z = (0 + 1j * self.gamma)", "-24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107,", "self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV", "\\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2])", "mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "normalized Irrational profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults", "= (1 - self.nL - self.nI - self.nH) * self.G(x) Lorentz = self.nL", "topPart = self.gamma bottomPart = (x ** 2 + self.gamma ** 2) *", "Lorentzian width. mu: float Location of the center, defaults to 0. amp: float", "ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not", "= np.abs(eta) if eta is not None else 0.5 if self._n > 1:", "value): self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) -", "self.gamma) / (self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor", "\\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x;", "0.5 if self._n > 1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu,", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "evaluates the pseudovoigt profile in the arguments supplied. Note ---- The formula used", "formula is taken from T. Ida et al. :cite:`Ida2000`, code inspired by the", "al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math::", "self.n * self.L(x) + (1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class", "= np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea", "&= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331,", "values supplied as FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math::", ":math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def", "self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self): a", "/ np.cosh(x / self.gamma)) ** 2 simplePart = 2 * self.gamma return super(HyperbolicSquared,", "+ 4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3) + 0.07842 *", "> 1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "self.rho * (1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L =", "&= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2,", "amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta", "sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile.", "self.fwhmG * (self.fwhmL ** 4) + self.fwhmL ** 5 ) ** (1.0 /", "not None else np.abs(1.0) self.mu = mu if mu is not None else", "calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as", "= self.n * self.L(x) + (1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val)", "import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 =", ":cite:`Yordanov2007`. This class uses a weighted sum of the Gaussian, Lorentzian, Irrational and", "list of 2 floats Full Width At Half Maximum of the components, defaults", "Irrat = self.nI * self.I(0) Hyper = self.nH * self.H(0) val = Gauss", "**kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta is", "------- Voigt Callable instance, evaluates the Voigt profile in the arguments supplied. Note", "self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) @property def n(self): return self._n", "4) + self.fwhmL ** 5 ) ** (1.0 / 5) if not self.ampIsArea:", "taken from T. Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis", "/ simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float", "* self.G(0) Lorentz = self.nL * self.L(0) Irrat = self.nI * self.I(0) Hyper", "None else 1.0 def __repr__(self): s = str(type(self)) + 'FWHM: {}, mu: {},", "= x - self.mu topPart = self.gamma bottomPart = (x ** 2 +", "/ (sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1) def", "\\ (0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5 self.sigma,", "Hyper = self.nH * self.H(x) val = Gauss + Lorentz + Irrat +", "not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1) def __call__(self, x): x", "self.fwhmL + \\ (0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) **", "self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / (2", "(1.0 - self.n) * self.G(0) @property def n(self): return self._n @n.setter def n(self,", "(self.fwhmL ** 4) + self.fwhmL ** 5 ) ** (1.0 / 5) if", "= self.rho * (1 + (1 - self.rho) * self.nL) self.nI = self.rho", "1) if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma)", "<reponame>woutergins/polarization<filename>polarization/profiles.py \"\"\" .. module:: profiles :platform: Windows :synopsis: Implementation of classes for different", "numpy as np from scipy.special import wofz from scipy.interpolate import interp1d __all__ =", "to 1. Returns ------- Lorentzian Callable instance, evaluates the Lorentzian profile in the", "/ (2 * self.gamma) def __call__(self, x): x = x - self.mu val", "* self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters", "{\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu,", "** (-1.5)) / (2 * self.gamma) def __call__(self, x): x = x -", "defaults to 1. Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile in", "Irrat = self.nI * self.I(x) Hyper = self.nH * self.H(x) val = Gauss", "sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile):", "= value, value self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self): a =", "value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea: z = (0", "Maximum, defaults to 1, ordered as Gaussian and Lorentzian width. mu: float Location", "'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi =", "profile, defaults to 1. Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile", "------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile in the arguments supplied. Note", "= self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "The used formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu,", "np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559,", "x - self.mu topPart = self.gamma bottomPart = (x ** 2 + self.gamma", "the supplied FWHM is appropriately transformed for the Gaussian and Lorentzian lineshapes: ..", "= self.nI * self.I(x) Hyper = self.nH * self.H(x) val = Gauss +", "formula used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva", "self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm", "- self.mu z = (x + 1j * self.gamma) / (self.sigma * sqrt2)", "Lorentzian profile in the arguments supplied. Note ---- The formula used is taken", "math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None,", "fwhm(self, value): self._fwhm = value self.L.fwhm = value self.G.fwhm = value if not", "self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2,", ":math:`w(z)` the Faddeeva function, and the values supplied as FWHM are appropriately transformed", "2 simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile):", "value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor += (1.0 -", "PseudoVoigt profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to", "\\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\"", "self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) def __call__(self, x):", "the arguments supplied. Note ---- The formula used is taken from the Wikipedia", "self.wG = s * (1 - self.rho * self.wG) self.wL = s *", "tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate G, L = seperate", "0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c =", "self.G(0) @property def n(self): return self._n @n.setter def n(self, value): value = np.abs(value)", "amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "0. amp: float Amplitude of the profile, defaults to 1. Attributes ---------- totalfwhm:", "floats Full Width At Half Maximum, defaults to 1, ordered as Gaussian and", "(x / self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma) return super(Irrational,", "instance, evaluates the Gaussian profile in the arguments supplied. Note ---- The used", "x): x = x - self.mu z = (x + 1j * self.gamma)", "Callable instance, evaluates the extended Voigt profile in the arguments supplied. Note ----", "np.abs(eta) if eta is not None else 0.5 if self._n > 1: self._n", "Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L} (x;", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def", "Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of Deyan Yordanov", "callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width At Half Maximum,", "class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full Width", "self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self):", "and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) = \\eta \\mathcal{L}", "the arguments supplied. Note ---- The formula used is taken from the webpage", "Hyper = self.nH * self.H(0) val = Gauss + Lorentz + Irrat +", "width. mu: float Location of the center, defaults to 0. amp: float Amplitude", "** 2) * (self.fwhmL ** 3) + 0.07842 * self.fwhmG * (self.fwhmL **", "bottomPart = (x ** 2 + self.gamma ** 2) * np.pi return super(Lorentzian,", "import wofz from scipy.interpolate import interp1d __all__ = ['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt',", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "= np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "/ (self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI", "and the supplied FWHM is appropriately transformed for the Gaussian and Lorentzian lineshapes:", "**kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "(0 + 1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real /", "to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\"", "/ self.gamma)) ** 2 simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart /", "\\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G", "\\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None,", "float Full Width At Half Maximum, defaults to 1. mu: float Location of", "T. Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME>", "= 1.0 else: factor = self._normFactor vals = vals / factor return self.amp", "32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG)", "- self.mu Gauss = (1 - self.nL - self.nI - self.nH) * self.G(x)", "by the PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses a weighted sum", "profile. Parameters ---------- fwhm: list of 2 floats Full Width At Half Maximum", "= Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV =", "+ self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c,", "class uses a weighted sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\"", "np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL", "(2 * self.gamma) def __call__(self, x): x = x - self.mu coshPart =", ":synopsis: Implementation of classes for different lineshapes, creating callables for easy and intuitive", "self.nL = self.rho * (1 + (1 - self.rho) * self.nL) self.nI =", "2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "evaluates the Gaussian profile in the arguments supplied. Note ---- The used formula", "of 2 floats Full Width At Half Maximum, defaults to 1, ordered as", "fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = 0.5", "seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL +", "self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I =", "from T. Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of", "* (self.fwhmL ** 3) + 0.07842 * self.fwhmG * (self.fwhmL ** 4) +", "the arguments supplied. Note ---- Formula taken from <NAME> et al. :cite:`Ida2000`, code", "Returns ------- Gaussian Callable instance, evaluates the Gaussian profile in the arguments supplied.", "0. amp: float Amplitude of the profile, defaults to 1. Returns ------- PseudoVoigt", "amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", ".. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self,", "= self.rho * (1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L", "Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile in the arguments supplied.", "0. amp: float Amplitude of the profile, defaults to 1. Returns ------- Gaussian", "(1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs)", "value): value = np.abs(value) if value > 1: value = value - int(value)", "the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &=", "HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm,", "self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea: self._normFactor =", "[9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134,", "= (x ** 2 + self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart", "value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166", "= wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable", "- int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "np.abs(value) if value > 1: value = value - int(value) self._n = value", "(x + 1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real /", "0.07842 * self.fwhmG * (self.fwhmL ** 4) + self.fwhmL ** 5 ) **", "<NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def", "self._n > 1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "** 3) + 0.07842 * self.fwhmG * (self.fwhmL ** 4) + self.fwhmL **", "(self.fwhmL + self.fwhmG) self.wG = np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI =", "= \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self,", "self.rho * (1 + (1 - self.rho) * self.nL) self.nI = self.rho *", "@fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG,", "**kwargs): super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "self.nI = self.rho * (1 - self.rho) * self.nI self.nH = self.rho *", "** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value,", "= s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals): if self.ampIsArea: factor =", "* self.fwhmG * (self.fwhmL ** 4) + self.fwhmL ** 5 ) ** (1.0", "super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ----------", "'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi = (2", "http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the values supplied as FWHM are", "/ normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm: float", "<NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np from scipy.special", "(self.fwhmL ** 3) + 0.07842 * self.fwhmG * (self.fwhmL ** 4) + self.fwhmL", "used formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma)", "(list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346", "(-1.5)) / (2 * self.gamma) def __call__(self, x): x = x - self.mu", "0. amp: float Amplitude of the profile, defaults to 1. Returns ------- Hyperbolic", "float Amplitude of the profile, defaults to 1. Returns ------- PseudoVoigt Callable instance,", "x): x = x - self.mu val = self.n * self.L(x) + (1.0", "**kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list,", "__init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n", "10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913])", "webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed for the Gaussian and", "G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs)", "self.G.fwhm = value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor +=", "\\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm =", "**kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "**self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269 * (self.fwhmG ** 4) *", "-1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179])", "self.ampIsArea: Gauss = (1 - self.nL - self.nI - self.nH) * self.G(0) Lorentz", "** 2) normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class", "supplied. Note ---- The used formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html:", "(x ** 2 + self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart /", "sqrt2pi) self._normFactor = top def __call__(self, x): x = x - self.mu z", "self.mu topPart = self.gamma bottomPart = (x ** 2 + self.gamma ** 2)", "fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if", "normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults", "fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "= seperate self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL **", "__repr__(self): s = str(type(self)) + 'FWHM: {}, mu: {}, amp: {}' s =", "s = str(type(self)) + 'FWHM: {}, mu: {}, amp: {}' s = s.format(self.fwhm,", "* self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt", "return self._n @n.setter def n(self, value): value = np.abs(value) if value > 1:", "= x - self.mu z = (x + 1j * self.gamma) / (self.sigma", "/ np.sqrt(np.power(2, 2.0 / 3) - 1) if not self.ampIsArea: self._normFactor = (1.0", "else 0.0 self.amp = amp if amp is not None else 1.0 def", "Voigt profile. Parameters ---------- fwhm: list of 2 floats Full Width At Half", "(1 - self.rho * self.wG) self.wL = s * (1 - (1 -", "\\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu,", "self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor", "simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A", "Approximation of the width based on the underlying widths. Returns ------- Voigt Callable", "value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if", "2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized", "HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269 * (self.fwhmG ** 4)", "Lorentz = self.nL * self.L(0) Irrat = self.nI * self.I(0) Hyper = self.nH", "\\gamma\\right) = \\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def", "callables for easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME>", "taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the", "sqrt2 = 2 ** 0.5 sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2", "self.L(x) + (1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A", "self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile.", "<NAME> :cite:`Yordanov2007`. This class uses a weighted sum of the Gaussian, Lorentzian, Irrational", "is not None else np.abs(1.0) self.mu = mu if mu is not None", "self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi) def __call__(self, x): x =", "(self.gamma * np.pi) def __call__(self, x): x = x - self.mu topPart =", "HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width At", "-0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165])", "module:: profiles :platform: Windows :synopsis: Implementation of classes for different lineshapes, creating callables", "self._normFactor = top def __call__(self, x): x = x - self.mu z =", "/ factor return self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile.", "super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ---------- fwhm: float", "'FWHM: {}, mu: {}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return s", "* self.gamma) def __call__(self, x): x = x - self.mu coshPart = (1.0", "self.wH = s * self.wH self.nL = self.rho * (1 + (1 -", "def fwhm(self, value): self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2) if not", "ordered as Gaussian and Lorentzian width. mu: float Location of the center, defaults", "moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np from", "not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma) def __call__(self,", "2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None,", "irrational profile in the arguments supplied. Note ---- The used formula is taken", "self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1) if not self.ampIsArea: self._normFactor =", "= value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL + \\", "is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &=", "= (0 + 1j * self.gamma) / (self.sigma * sqrt2) top = wofz(z).real", "normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm: float Full", "def __call__(self, x): x = x - self.mu val = ((1.0 + (x", "2 ** 0.5 sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2 = 2", "** 4) * self.fwhmL + 2.42843 * (self.fwhmG ** 3) * (self.fwhmL **", "None else 0.5 if self._n > 1: self._n = self._n - int(self._n) super(PseudoVoigt,", "[-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158,", ".. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self,", "and the values supplied as FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`:", "eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n =", "self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0)", "* value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea: z =", "(1.0 ** (-1.5)) / (2 * self.gamma) def __call__(self, x): x = x", "Half Maximum, defaults to 1, ordered as Gaussian and Lorentzian width. mu: float", "np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475,", "self._n @n.setter def n(self, value): value = np.abs(value) if value > 1: value", "** 2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable", "self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1) def __call__(self, x): x =", "mu: float Location of the center, defaults to 0. amp: float Amplitude of", "value, value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm / self._fwhmNorm", "* vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm: float", "Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta is not None else", ":cite:`Ida2000`, code inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math::", "\\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G =", "the profile, defaults to 1. Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt", "extended Voigt profile. Parameters ---------- fwhm: list of 2 floats Full Width At", "@fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / (2 *", "of the profile, defaults to 1. Returns ------- Gaussian Callable instance, evaluates the", "float Location of the center, defaults to 0. amp: float Amplitude of the", "if not self.ampIsArea: Gauss = (1 - self.nL - self.nI - self.nH) *", "of 2 floats Full Width At Half Maximum of the components, defaults to", "= np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG = s * (1 -", "0.5 sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2", "**self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG **", "= x - self.mu val = ((1.0 + (x / self.gamma) ** 2)", "= self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable", "of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "x = x - self.mu val = ((1.0 + (x / self.gamma) **", "super(Gaussian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "-0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g", "---------- fwhm: list of 2 floats Full Width At Half Maximum of the", "eta is not None else 0.5 if self._n > 1: self._n = self._n", "0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object):", "thesis of <NAME> :cite:`Yordanov2007`. This class uses a weighted sum of the Gaussian,", "fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma = self.fwhm", "the profile, defaults to 1. Returns ------- Irrational Callable instance, evaluates the irrational", "= self.rho * (1 - self.rho) * self.nI self.nH = self.rho * (1", "transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}}", "self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm = value self.G.fwhm =", "taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma}", "1. mu: float Location of the center, defaults to 0. amp: float Amplitude", "self.wI = s * self.wI self.wH = s * self.wH self.nL = self.rho", "of the profile, defaults to 1. Returns ------- Irrational Callable instance, evaluates the", "the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right)", "self.nH = self.rho * (1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs)", "(self.sigma * sqrt2pi) self._normFactor = top def __call__(self, x): x = x -", "np.pi return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile.", "(1 - self.nL - self.nI - self.nH) * self.G(x) Lorentz = self.nL *", "= value if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor += (1.0", "/ self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value", "0.5 * self.fwhm if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi)", "if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1) def __call__(self, x):", "Amplitude of the profile, defaults to 1. Returns ------- Lorentzian Callable instance, evaluates", "Note ---- Formula taken from <NAME> et al. :cite:`Ida2000`, code inspired by the", "mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "self.fwhm if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi) def __call__(self,", "the irrational profile in the arguments supplied. Note ---- The used formula is", "fwhm(self, value): self._fwhm = value self.sigma = self.fwhm / (sqrt2log2t2) if not self.ampIsArea:", "def __call__(self, vals): if self.ampIsArea: factor = 1.0 else: factor = self._normFactor vals", "ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not None else np.abs(1.0) self.mu =", "np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea =", "weighted sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None,", "- self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I", "= np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL /", "callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full Width At Half Maximum,", "= s * (1 - (1 - self.rho) * self.wL) self.wI = s", "the pseudovoigt profile in the arguments supplied. Note ---- The formula used is", "* self.fwhm if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma * np.pi) def", "else 0.5 if self._n > 1: self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm,", "s * self.wI self.wH = s * self.wH self.nL = self.rho * (1", "class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm: list of 2", "Amplitude of the profile, defaults to 1. Returns ------- Irrational Callable instance, evaluates", "= self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) @property def", "float Amplitude of the profile, defaults to 1. Returns ------- Lorentzian Callable instance,", "self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm", "fwhm(self, value): self._fwhm = value self.gamma = 0.5 * self.fwhm if not self.ampIsArea:", "float Amplitude of the profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation", "are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &=", "super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "Half Maximum, defaults to 1. mu: float Location of the center, defaults to", "self.mu coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2 simplePart = 2", "__call__(self, x): x = x - self.mu topPart = self.gamma bottomPart = (x", "= wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top def __call__(self, x): x", "webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the values supplied as FWHM", "self.rho) self.wG = s * (1 - self.rho * self.wG) self.wL = s", "\\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property", "0.6144031129489123 * value self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052,", "super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "np.cosh(x / self.gamma)) ** 2 simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart", "r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm: float Full Width At Half", "self._n = self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "** (-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable", "not self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma) def __call__(self, x): x", ".. module:: profiles :platform: Windows :synopsis: Implementation of classes for different lineshapes, creating", "* (self.fwhmG ** 3) * (self.fwhmL ** 2) + 4.47163 * (self.fwhmG **", "= Gauss + Lorentz + Irrat + Hyper self._normFactor = val def __call__(self,", "------- Hyperbolic Callable instance, evaluates the hyperbolic profile in the arguments supplied. Note", "self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH", "-1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h", "= np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947,", "defaults to 1. Returns ------- Irrational Callable instance, evaluates the irrational profile in", "mu is not None else 0.0 self.amp = amp if amp is not", "if not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor += (1.0 - self.n)", "self.nL - self.nI - self.nH) * self.G(x) Lorentz = self.nL * self.L(x) Irrat", "** 5 + 2.69269 * (self.fwhmG ** 4) * self.fwhmL + 2.42843 *", "the hyperbolic profile in the arguments supplied. Note ---- The used formula is", "= 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable", "34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653,", "Note ---- The used formula is taken from T. Ida et al. :cite:`Ida2000`,", "normalized extended Voigt profile. Parameters ---------- fwhm: list of 2 floats Full Width", "self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) @property def n(self):", "{}, mu: {}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return s def", "Returns ------- Voigt Callable instance, evaluates the Voigt profile in the arguments supplied.", "---- The formula used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math::", "<NAME> et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`.", "of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM", "Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm: float Full Width At", "---------- totalfwhm: float Approximation of the width based on the underlying widths. Returns", "0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c", "* self.I(0) Hyper = self.nH * self.H(0) val = Gauss + Lorentz +", "to 1. mu: float Location of the center, defaults to 0. amp: float", "**kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "self.gamma) / (self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) return", "(self.fwhmL ** 2) + 4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3)", "Irrat + Hyper self._normFactor = val def __call__(self, x): x = x -", "value[0:2] self.fwhmG, self.fwhmL = seperate G, L = seperate self._fwhm = 0.5346 *", "= value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if not", "Half Maximum of the components, defaults to 1. Ordered as Gaussian, then Lorentzian.", "Maximum of the components, defaults to 1. Ordered as Gaussian, then Lorentzian. mu:", "{}' s = s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals): if self.ampIsArea:", "not self.ampIsArea: Gauss = (1 - self.nL - self.nI - self.nH) * self.G(0)", "self.G(0) def __call__(self, x): x = x - self.mu val = self.n *", "Full Width At Half Maximum, defaults to 1, ordered as Gaussian and Lorentzian", "4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743,", "self.I(0) Hyper = self.nH * self.H(0) val = Gauss + Lorentz + Irrat", "fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm", "- self.mu coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2 simplePart =", "+ 2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2) + 4.47163 *", "0.6144031129489123 * value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea: z", "Full Width At Half Maximum of the components, defaults to 1. Ordered as", "* self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI,", "callable normalized extended Voigt profile. Parameters ---------- fwhm: list of 2 floats Full", "self.wI self.wH = s * self.wH self.nL = self.rho * (1 + (1", "center, defaults to 0. amp: float Amplitude of the profile, defaults to 1.", "self.gamma bottomPart = (x ** 2 + self.gamma ** 2) * np.pi return", "= self._normFactor vals = vals / factor return self.amp * vals class Gaussian(Profile):", "def fwhm(self, value): self._fwhm = value self.gamma = 0.5 * self.fwhm if not", "2.0 / 3) - 1) if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5))", "FWHM is appropriately transformed for the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x;", "appropriately transformed for the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta,", "@fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0", "the arguments supplied. Note ---- The used formula is taken from the MathWorld", "-0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho", "'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi = (2 * np.pi) ** 0.5", "= seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123", "* self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile.", "L = seperate self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL", "\"\"\" .. module:: profiles :platform: Windows :synopsis: Implementation of classes for different lineshapes,", "return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = 0.5 *", "supplied FWHM is appropriately transformed for the Gaussian and Lorentzian lineshapes: .. math::", "self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not", "self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho)", "g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "Hyper self._normFactor = val def __call__(self, x): x = x - self.mu Gauss", "/ self._fwhmNorm if not self.ampIsArea: z = (0 + 1j * self.gamma) /", "* self.gamma) def __call__(self, x): x = x - self.mu val = ((1.0", "taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma}", "mu=None, amp=None, ampIsArea=False): super(Profile, self).__init__() self.ampIsArea = ampIsArea self.fwhm = np.abs(fwhm) if fwhm", "x - self.mu coshPart = (1.0 / np.cosh(x / self.gamma)) ** 2 simplePart", "= val def __call__(self, x): x = x - self.mu Gauss = (1", "0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2 + self.fwhmG **", "self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.sigma,", "---- The formula used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)`", "sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top def __call__(self,", "math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None,", "= np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571,", "\\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "* sqrt2pi) ** (-1) def __call__(self, x): x = x - self.mu expPart", "x): x = x - self.mu val = ((1.0 + (x / self.gamma)", "self.wL = s * (1 - (1 - self.rho) * self.wL) self.wI =", "as Gaussian, then Lorentzian. mu: float Location of the center, defaults to 0.", "self._normFactor = self.n * self.L(0) self._normFactor += (1.0 - self.n) * self.G(0) @property", "self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG = s *", "self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs)", "return self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ----------", "Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm: float Full Width At", "\\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None,", "(self.fwhmG ** 3) * (self.fwhmL ** 2) + 4.47163 * (self.fwhmG ** 2)", "/ (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational", ".. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None,", "self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "normalized Lorentzian profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults", "self.H(x) val = Gauss + Lorentz + Irrat + Hyper return super(ExtendedVoigt, self).__call__(val)", "Callable instance, evaluates the Gaussian profile in the arguments supplied. Note ---- The", "= str(type(self)) + 'FWHM: {}, mu: {}, amp: {}' s = s.format(self.fwhm, self.mu,", "def __call__(self, x): x = x - self.mu coshPart = (1.0 / np.cosh(x", ":cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self,", "&= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults", "- self.rho * self.wG) self.wL = s * (1 - (1 - self.rho)", "= vals / factor return self.amp * vals class Gaussian(Profile): r\"\"\"A callable normalized", "is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and", "[3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g = np.array( [9.76947, -24.12407, 22.10544,", "(1.0 / np.cosh(x / self.gamma)) ** 2 simplePart = 2 * self.gamma return", "Location of the center, defaults to 0. amp: float Amplitude of the profile,", "= np.polyval(h, self.rho) self.wG = s * (1 - self.rho * self.wG) self.wL", "transformed for the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma,", "profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of the total width,", "np.abs(fwhm) if fwhm is not None else np.abs(1.0) self.mu = mu if mu", "Width At Half Maximum, defaults to 1, ordered as Gaussian and Lorentzian width.", "&= 2g\\ln\\left(\\sqrt{2}+1\\right)\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "from <NAME> et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME>", "(1.0 - self.n) * self.G(0) def __call__(self, x): x = x - self.mu", "sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 *", "self.nH = np.polyval(h, self.rho) self.wG = s * (1 - self.rho * self.wG)", "def n(self, value): value = np.abs(value) if value > 1: value = value", "(self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top", "seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 *", "sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None,", "is not None else 0.0 self.amp = amp if amp is not None", "self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma = 0.5 * self.fwhm", "(x / self.sigma) ** 2) normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart", "** 2 + self.gamma ** 2) * np.pi return super(Lorentzian, self).__call__(topPart / bottomPart)", "Note ---- The formula used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with", "callable normalized Voigt profile. Parameters ---------- fwhm: list of 2 floats Full Width", "to 1. Returns ------- Irrational Callable instance, evaluates the irrational profile in the", "2) normPart = self.sigma * sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile):", "__call__(self, x): x = x - self.mu coshPart = (1.0 / np.cosh(x /", "self.gamma)) ** 2 simplePart = 2 * self.gamma return super(HyperbolicSquared, self).__call__(coshPart / simplePart)", "1. Ordered as Gaussian, then Lorentzian. mu: float Location of the center, defaults", "Gaussian Callable instance, evaluates the Gaussian profile in the arguments supplied. Note ----", "= 0.6144031129489123 * value self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea:", "vals): if self.ampIsArea: factor = 1.0 else: factor = self._normFactor vals = vals", "s def __call__(self, vals): if self.ampIsArea: factor = 1.0 else: factor = self._normFactor", "@property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple,", "of <NAME> :cite:`Yordanov2007`. This class uses a weighted sum of the Gaussian, Lorentzian,", "/ (2 * self.gamma) def __call__(self, x): x = x - self.mu coshPart", "np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d,", "Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm: list of 2 floats", "* self.wH self.nL = self.rho * (1 + (1 - self.rho) * self.nL)", "the profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of the width", "\\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt,", "fwhm=None, mu=None, amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "@property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma", "def n(self): return self._n @n.setter def n(self, value): value = np.abs(value) if value", "Width At Half Maximum, defaults to 1. mu: float Location of the center,", "self._normFactor += (1.0 - self.n) * self.G(0) def __call__(self, x): x = x", "The formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM", "Gauss = (1 - self.nL - self.nI - self.nH) * self.G(0) Lorentz =", "4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693,", "b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array(", "---------- totalfwhm: float Approximation of the total width, based on the underlying widths.", "Voigt profile in the arguments supplied. Note ---- Formula taken from <NAME> et", "self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters ---------- fwhm: list", "profile in the arguments supplied. Note ---- Formula taken from <NAME> et al.", "self._normFactor = val def __call__(self, x): x = x - self.mu Gauss =", "amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value,", "/ bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm: list", "(1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L =", "The formula used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x;", "float Approximation of the total width, based on the underlying widths. Returns -------", "fwhm: list of 2 floats Full Width At Half Maximum of the components,", "@property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm", "---------- fwhm: float Full Width At Half Maximum, defaults to 1. mu: float", "* self.L(x) + (1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile):", "Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs", "self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho)", "self.fwhm = np.abs(fwhm) if fwhm is not None else np.abs(1.0) self.mu = mu", "{}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals):", "super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "to 1. Returns ------- Gaussian Callable instance, evaluates the Gaussian profile in the", "- self.nL - self.nI - self.nH) * self.G(x) Lorentz = self.nL * self.L(x)", "= np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL =", "defaults to 0. amp: float Amplitude of the profile, defaults to 1. Attributes", "Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG", "self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66])", "5 + 2.69269 * (self.fwhmG ** 4) * self.fwhmL + 2.42843 * (self.fwhmG", "float Approximation of the width based on the underlying widths. Returns ------- Voigt", "2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value, value self._fwhm =", "wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top def __call__(self, x): x =", "self.nL) self.nI = self.rho * (1 - self.rho) * self.nI self.nH = self.rho", "from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)}", "'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi = (2 * np.pi) **", "0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho =", "self.H = HyperbolicSquared(fwhm=self.wH, **self.kwargs) self.fwhmV = (self.fwhmG ** 5 + 2.69269 * (self.fwhmG", "self.rho * (1 - self.rho) * self.nI self.nH = self.rho * (1 -", "= ampIsArea self.fwhm = np.abs(fwhm) if fwhm is not None else np.abs(1.0) self.mu", "self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1)", "* sqrt2pi return super(Gaussian, self).__call__(expPart / normPart) class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian", "Amplitude of the profile, defaults to 1. Returns ------- Hyperbolic Callable instance, evaluates", "return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate =", "(1 - self.nL - self.nI - self.nH) * self.G(0) Lorentz = self.nL *", "widths. Returns ------- ExtendedVoigt Callable instance, evaluates the extended Voigt profile in the", "profile in the arguments supplied. Note ---- The used formula is taken from", "not None else 0.0 self.amp = amp if amp is not None else", "\\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm,", "0. amp: float Amplitude of the profile, defaults to 1. Returns ------- Irrational", "1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155, -1.38927, -0.30165]) g =", "super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ----------", "&= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational,", "list of 2 floats Full Width At Half Maximum, defaults to 1, ordered", "amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm =", "* (self.fwhmL ** 2) + 4.47163 * (self.fwhmG ** 2) * (self.fwhmL **", "classes for different lineshapes, creating callables for easy and intuitive calculations. .. moduleauthor::", "self.n) * self.G(0) @property def n(self): return self._n @n.setter def n(self, value): value", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "defaults to 1. Attributes ---------- totalfwhm: float Approximation of the total width, based", "= np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG =", "def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.gamma =", "= seperate self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL **", "float Amplitude of the profile, defaults to 1. Returns ------- Gaussian Callable instance,", "amp=None, **kwargs): super(HyperbolicSquared, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array(", "T. Ida et al. :cite:`Ida2000`, code inspired by the PhD thesis of Deyan", "<NAME> <<EMAIL>> \"\"\" import numpy as np from scipy.special import wofz from scipy.interpolate", "the components, defaults to 1. Ordered as Gaussian, then Lorentzian. mu: float Location", "** 0.5 sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2 = 2 *", "if mu is not None else 0.0 self.amp = amp if amp is", "---- The formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied", "Lorentzian. mu: float Location of the center, defaults to 0. amp: float Amplitude", "and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self,", "fwhm is not None else np.abs(1.0) self.mu = mu if mu is not", "= s * self.wH self.nL = self.rho * (1 + (1 - self.rho)", "kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "Gaussian and Lorentzian width. mu: float Location of the center, defaults to 0.", "* self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma =", "= 0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL ** 2 + self.fwhmG", "= (1.0 / np.cosh(x / self.gamma)) ** 2 simplePart = 2 * self.gamma", "= (1 - self.nL - self.nI - self.nH) * self.G(0) Lorentz = self.nL", "Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs =", "* (1 - self.rho) * self.nH self.G = Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL,", "def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma =", "Gaussian profile in the arguments supplied. Note ---- The used formula is taken", "return super(Lorentzian, self).__call__(topPart / bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters", "Lorentz + Irrat + Hyper self._normFactor = val def __call__(self, x): x =", "(1.0 - self.n) * self.G(x) return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized", "mu: {}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return s def __call__(self,", "z = (x + 1j * self.gamma) / (self.sigma * sqrt2) top =", "thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM &=", "-0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d =", "** 3) * (self.fwhmL ** 2) + 4.47163 * (self.fwhmG ** 2) *", "callable normalized Irrational profile. Parameters ---------- fwhm: float Full Width At Half Maximum,", "+ self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else:", "self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL", "- self.mu val = self.n * self.L(x) + (1.0 - self.n) * self.G(x)", "in the arguments supplied. Note ---- Formula taken from <NAME> et al. :cite:`Ida2000`,", "x - self.mu val = self.n * self.L(x) + (1.0 - self.n) *", "\\eta \\mathcal{L} (x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None,", "self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val) class", "np.polyval(c, self.rho) self.wH = np.polyval(d, self.rho) self.nL = np.polyval(f, self.rho) self.nI = np.polyval(g,", "__call__(self, vals): if self.ampIsArea: factor = 1.0 else: factor = self._normFactor vals =", "@n.setter def n(self, value): value = np.abs(value) if value > 1: value =", "* np.pi) ** 0.5 sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2)) base_e =", "Attributes ---------- totalfwhm: float Approximation of the width based on the underlying widths.", "(0.2166 * self.fwhmL ** 2 + self.fwhmG ** 2) ** 0.5 self.sigma, self.gamma", "+ 1)) if not self.ampIsArea: self._normFactor = 1.0 / (2 * self.gamma) def", "n(self): return self._n @n.setter def n(self, value): value = np.abs(value) if value >", "if amp is not None else 1.0 def __repr__(self): s = str(type(self)) +", "Callable instance, evaluates the hyperbolic profile in the arguments supplied. Note ---- The", "Formula taken from <NAME> et al. :cite:`Ida2000`, code inspired by the PhD thesis", "defaults to 1. Returns ------- Gaussian Callable instance, evaluates the Gaussian profile in", "= Gaussian(fwhm=self.wG, **self.kwargs) self.L = Lorentzian(fwhm=self.wL, **self.kwargs) self.I = Irrational(fwhm=self.wI, **self.kwargs) self.H =", "to 1. Attributes ---------- totalfwhm: float Approximation of the width based on the", "evaluates the Voigt profile in the arguments supplied. Note ---- The formula used", "/ (self.gamma * np.pi) def __call__(self, x): x = x - self.mu topPart", "np.polyval(f, self.rho) self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG = s", "np.sqrt(2 * np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None,", "seperate G, L = seperate self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166", "is not None else 0.5 if self._n > 1: self._n = self._n -", "Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic profile in the arguments supplied.", "inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. This class uses a weighted", "def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)):", "= self.fwhm / np.sqrt(np.power(2, 2.0 / 3) - 1) if not self.ampIsArea: self._normFactor", "= (1.0 ** (-1.5)) / (2 * self.gamma) def __call__(self, x): x =", "** 2) ** 0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL", ":math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None,", "and HyperbolicSquared profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt,", "def __call__(self, x): x = x - self.mu z = (x + 1j", "sqrt2pi) ** (-1) def __call__(self, x): x = x - self.mu expPart =", "self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self): a = np.array( [-2.95553, 8.48252,", "1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f", "57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622,", "and Lorentzian width. mu: float Location of the center, defaults to 0. amp:", "used formula is taken from T. Ida et al. :cite:`Ida2000`, code inspired by", "from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function, and the values", "z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm,", "defaults to 0. amp: float Amplitude of the profile, defaults to 1. Returns", "+ Hyper self._normFactor = val def __call__(self, x): x = x - self.mu", "Hyperbolic Callable instance, evaluates the hyperbolic profile in the arguments supplied. Note ----", "to 1. Ordered as Gaussian, then Lorentzian. mu: float Location of the center,", "1. Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic profile in the arguments", "creating callables for easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor::", "widths. Returns ------- Voigt Callable instance, evaluates the Voigt profile in the arguments", "self.nH) * self.G(0) Lorentz = self.nL * self.L(0) Irrat = self.nI * self.I(0)", "the center, defaults to 0. amp: float Amplitude of the profile, defaults to", "x): x = x - self.mu topPart = self.gamma bottomPart = (x **", "the profile, defaults to 1. Returns ------- Gaussian Callable instance, evaluates the Gaussian", "x = x - self.mu coshPart = (1.0 / np.cosh(x / self.gamma)) **", "supplied. Note ---- The formula used is taken from the webpage http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and", "- self.nI - self.nH) * self.G(x) Lorentz = self.nL * self.L(x) Irrat =", "vals class Gaussian(Profile): r\"\"\"A callable normalized Gaussian profile. Parameters ---------- fwhm: float Full", "None else 0.0 self.amp = amp if amp is not None else 1.0", "of the profile, defaults to 1. Returns ------- PseudoVoigt Callable instance, evaluates the", "* self.nI self.nH = self.rho * (1 - self.rho) * self.nH self.G =", "the width based on the underlying widths. Returns ------- Voigt Callable instance, evaluates", "not self.ampIsArea: self._normFactor = self.n * self.L(0) self._normFactor += (1.0 - self.n) *", "self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 * value self.sigma, self.gamma =", "np.polyval(h, self.rho) self.wG = s * (1 - self.rho * self.wG) self.wL =", "sqrt2pi) return super(Voigt, self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ----------", "self.gamma = 0.5 * self.fwhm if not self.ampIsArea: self._normFactor = 1.0 / (self.gamma", "self.nI * self.I(x) Hyper = self.nH * self.H(x) val = Gauss + Lorentz", "- self.nL - self.nI - self.nH) * self.G(0) Lorentz = self.nL * self.L(0)", "self.fwhm / (2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor = 1.0", "1. Returns ------- Gaussian Callable instance, evaluates the Gaussian profile in the arguments", "= kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self):", "+ Irrat + Hyper self._normFactor = val def __call__(self, x): x = x", "At Half Maximum, defaults to 1, ordered as Gaussian and Lorentzian width. mu:", "Lorentzian profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to", "np.sqrt(np.power(2, 2.0 / 3) - 1) if not self.ampIsArea: self._normFactor = (1.0 **", "> 1: value = value - int(value) self._n = value if not self.ampIsArea:", ".. moduleauthor:: <NAME> <<EMAIL>> .. moduleauthor:: <NAME> <<EMAIL>> \"\"\" import numpy as np", "bottomPart) class Voigt(Profile): r\"\"\"A callable normalized Voigt profile. Parameters ---------- fwhm: list of", "value - int(value) self._n = value if not self.ampIsArea: self._normFactor = self.n *", "+ (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L", "def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL", "* (self.fwhmL ** 4) + self.fwhmL ** 5 ) ** (1.0 / 5)", "Voigt profile in the arguments supplied. Note ---- The formula used is taken", "width, based on the underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates the", "+ self.fwhmL ** 5 ) ** (1.0 / 5) if not self.ampIsArea: Gauss", "tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate self._fwhm = 0.5346 *", "self.fwhmL = seperate self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL", "= np.array( [9.76947, -24.12407, 22.10544, -11.09215, 3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142,", "-9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651, 10.30003,", "HyperbolicSquared profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to", "self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value, value self._fwhm = 0.6144031129489123 *", "+= (1.0 - self.n) * self.G(0) @property def n(self): return self._n @n.setter def", "self._fwhm = value self.L.fwhm = value self.G.fwhm = value if not self.ampIsArea: self._normFactor", "self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta is not None else 0.5", "self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if eta is not", "(-1) def __call__(self, x): x = x - self.mu expPart = np.exp(-0.5 *", "= x - self.mu val = self.n * self.L(x) + (1.0 - self.n)", "(x; \\gamma, \\mu) + (1-\\eta) G(x; \\sigma, \\mu)\"\"\" def __init__(self, eta=None, fwhm=None, mu=None,", "super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters ---------- fwhm:", "super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466, 2.76622, -0.68688, -0.47745,", "lineshapes, creating callables for easy and intuitive calculations. .. moduleauthor:: <NAME> <<EMAIL>> ..", "Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &=", "the extended Voigt profile in the arguments supplied. Note ---- Formula taken from", "of the width based on the underlying widths. Returns ------- Voigt Callable instance,", "self.gamma) return super(Irrational, self).__call__(val) class HyperbolicSquared(Profile): r\"\"\"A callable normalized HyperbolicSquared profile. Parameters ----------", "PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters ---------- fwhm: float Full Width At", "/ (self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor =", "(self.fwhmG ** 4) * self.fwhmL + 2.42843 * (self.fwhmG ** 3) * (self.fwhmL", "= 2 ** 0.5 sqrt2pi = (2 * np.pi) ** 0.5 sqrt2log2t2 =", "s = s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals): if self.ampIsArea: factor", "FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right)", "r\"\"\"A callable normalized extended Voigt profile. Parameters ---------- fwhm: list of 2 floats", "floats Full Width At Half Maximum of the components, defaults to 1. Ordered", "* self.fwhmL ** 2 + self.fwhmG ** 2) else: self.fwhmG, self.fwhmL = value,", "__call__(self, x): x = x - self.mu expPart = np.exp(-0.5 * (x /", "(sqrt2log2t2) if not self.ampIsArea: self._normFactor = (self.sigma * sqrt2pi) ** (-1) def __call__(self,", "Amplitude of the profile, defaults to 1. Returns ------- Gaussian Callable instance, evaluates", "to 0. amp: float Amplitude of the profile, defaults to 1. Attributes ----------", "the Faddeeva function, and the values supplied as FWHM are appropriately transformed to", "def __repr__(self): s = str(type(self)) + 'FWHM: {}, mu: {}, amp: {}' s", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "the values supplied as FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: ..", "the profile, defaults to 1. Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic", "+ 'FWHM: {}, mu: {}, amp: {}' s = s.format(self.fwhm, self.mu, self.amp) return", "value self.L.fwhm = value self.G.fwhm = value if not self.ampIsArea: self._normFactor = self.n", "amp is not None else 1.0 def __repr__(self): s = str(type(self)) + 'FWHM:", "evaluates the extended Voigt profile in the arguments supplied. Note ---- Formula taken", "underlying widths. Returns ------- Voigt Callable instance, evaluates the Voigt profile in the", "= np.polyval(a, self.rho) self.wL = np.polyval(b, self.rho) self.wI = np.polyval(c, self.rho) self.wH =", "val def __call__(self, x): x = x - self.mu Gauss = (1 -", "\\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None,", "Callable instance, evaluates the irrational profile in the arguments supplied. Note ---- The", "Gaussian profile. Parameters ---------- fwhm: float Full Width At Half Maximum, defaults to", "- self.n) * self.G(0) def __call__(self, x): x = x - self.mu val", "(1 - (1 - self.rho) * self.wL) self.wI = s * self.wI self.wH", "hyperbolic profile in the arguments supplied. Note ---- The used formula is taken", "int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def", "self.rho) * self.nI self.nH = self.rho * (1 - self.rho) * self.nH self.G", "self.H(0) val = Gauss + Lorentz + Irrat + Hyper self._normFactor = val", "self.L.fwhm = value self.G.fwhm = value if not self.ampIsArea: self._normFactor = self.n *", "http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self,", "= np.exp(-0.5 * (x / self.sigma) ** 2) normPart = self.sigma * sqrt2pi", "V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs):", "self.fwhmL + 2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2) + 4.47163", "seperate self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166 * self.fwhmL ** 2", "fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / np.sqrt(np.power(2, 2.0 / 3)", "self._n - int(self._n) super(PseudoVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if", "(1 - self.rho) * self.nL) self.nI = self.rho * (1 - self.rho) *", "Windows :synopsis: Implementation of classes for different lineshapes, creating callables for easy and", "class Lorentzian(Profile): r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm: float Full Width", "= 1.0 / (self.gamma * np.pi) def __call__(self, x): x = x -", "self.G(0) Lorentz = self.nL * self.L(0) Irrat = self.nI * self.I(0) Hyper =", "totalfwhm: float Approximation of the width based on the underlying widths. Returns -------", "evaluates the Lorentzian profile in the arguments supplied. Note ---- The formula used", "3.23653, -0.14107, 0.25437]) h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579])", "8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453, 29.14158, -23.45651,", "based on the underlying widths. Returns ------- Voigt Callable instance, evaluates the Voigt", "by the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g}", "extended Voigt profile in the arguments supplied. Note ---- Formula taken from <NAME>", "else: factor = self._normFactor vals = vals / factor return self.amp * vals", "self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value self._fwhm =", "code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right)", "np.array( [-2.95553, 8.48252, -9.48291, 4.74052, -1.24984, 0.15021, 0.66]) b = np.array( [3.19974, -16.50453,", "At Half Maximum, defaults to 1. mu: float Location of the center, defaults", "value self._fwhm = 0.6144031129489123 * value self.setParams() def setParams(self): a = np.array( [-2.95553,", "return super(PseudoVoigt, self).__call__(val) class ExtendedVoigt(Profile): r\"\"\"A callable normalized extended Voigt profile. Parameters ----------", "* sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top) class", "** 5 ) ** (1.0 / 5) if not self.ampIsArea: Gauss = (1", "underlying widths. Returns ------- ExtendedVoigt Callable instance, evaluates the extended Voigt profile in", "FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp,", "---------- fwhm: list of 2 floats Full Width At Half Maximum, defaults to", "-16.50453, 29.14158, -23.45651, 10.30003, -1.25693, -0.42179]) c = np.array( [-17.80614, 57.92559, -73.61822, 47.06071,", "factor = 1.0 else: factor = self._normFactor vals = vals / factor return", "al. :cite:`Ida2000`, code inspired by the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. ..", "self._normFactor vals = vals / factor return self.amp * vals class Gaussian(Profile): r\"\"\"A", "+ (1 - self.rho) * self.nL) self.nI = self.rho * (1 - self.rho)", "Parameters ---------- fwhm: list of 2 floats Full Width At Half Maximum, defaults", "Note ---- The used formula is taken from the MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: ..", "value): if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2] self.fwhmG, self.fwhmL = seperate", "fwhm=None, mu=None, amp=None, **kwargs): self._fwhmNorm = np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "= value - int(value) self._n = value if not self.ampIsArea: self._normFactor = self.n", "fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate", "self.mu val = self.n * self.L(x) + (1.0 - self.n) * self.G(x) return", "self.nI * self.I(0) Hyper = self.nH * self.H(0) val = Gauss + Lorentz", "self.L(0) Irrat = self.nI * self.I(0) Hyper = self.nH * self.H(0) val =", "- int(value) self._n = value if not self.ampIsArea: self._normFactor = self.n * self.L(0)", "r\"\"\"A callable normalized Lorentzian profile. Parameters ---------- fwhm: float Full Width At Half", "defaults to 1. mu: float Location of the center, defaults to 0. amp:", "self.gamma) def __call__(self, x): x = x - self.mu val = ((1.0 +", "** 4) + self.fwhmL ** 5 ) ** (1.0 / 5) if not", "* self.G(0) def __call__(self, x): x = x - self.mu val = self.n", "-4.55466, 2.76622, -0.68688, -0.47745, 1.10186]) f = np.array( [3.7029, -21.18862, 34.96491, -24.10743, 9.3155,", "not self.ampIsArea: z = (0 + 1j * self.gamma) / (self.sigma * sqrt2)", "if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2 * self.gamma) def", "__init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs)", "http://en.wikipedia.org/wiki/Voigt_profile#Pseudo-Voigt_Approximation, and the supplied FWHM is appropriately transformed for the Gaussian and Lorentzian", "value > 1: value = value - int(value) self._n = value if not", "x): x = x - self.mu coshPart = (1.0 / np.cosh(x / self.gamma))", "self.nI = np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG = s * (1", "fwhm: float Full Width At Half Maximum, defaults to 1. mu: float Location", "np.array([sqrt2log2t2, 2]) super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter", "3) - 1) if not self.ampIsArea: self._normFactor = (1.0 ** (-1.5)) / (2", "* self.I(x) Hyper = self.nH * self.H(x) val = Gauss + Lorentz +", "* self.gamma) / (self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi)", "x = x - self.mu val = self.n * self.L(x) + (1.0 -", "** 2) + 4.47163 * (self.fwhmG ** 2) * (self.fwhmL ** 3) +", "if self.ampIsArea: factor = 1.0 else: factor = self._normFactor vals = vals /", "PseudoVoigt Callable instance, evaluates the pseudovoigt profile in the arguments supplied. Note ----", "value = value - int(value) self._n = value if not self.ampIsArea: self._normFactor =", "al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. This class", "float Amplitude of the profile, defaults to 1. Returns ------- Hyperbolic Callable instance,", "np.polyval(g, self.rho) self.nH = np.polyval(h, self.rho) self.wG = s * (1 - self.rho", "used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile, with :math:`w(z)` the Faddeeva function,", "((1.0 + (x / self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma)", "self.sigma, self.gamma = self._fwhm / self._fwhmNorm if not self.ampIsArea: z = (0 +", "def __init__(self, eta=None, fwhm=None, mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs)", "h = np.array( [-10.02142, 32.83023, -39.71134, 23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL", "fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return", "x = x - self.mu topPart = self.gamma bottomPart = (x ** 2", "of the center, defaults to 0. amp: float Amplitude of the profile, defaults", ".. math:: V\\left(x;\\mu, \\sigma, \\gamma\\right) &= \\frac{\\Re\\left[w\\left(z\\right) \\right]}{\\sigma\\sqrt{2\\pi}} z&=\\frac{x+i\\gamma}{\\sigma\\sqrt{2\\pi}}\"\"\" def __init__(self, fwhm=None, mu=None,", "- self.rho) * self.nI self.nH = self.rho * (1 - self.rho) * self.nH", "profiles.\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): self.kwargs = kwargs super(ExtendedVoigt, self).__init__(fwhm=fwhm, mu=mu,", "value): self._fwhm = value self.L.fwhm = value self.G.fwhm = value if not self.ampIsArea:", "def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.L.fwhm =", "Returns ------- Irrational Callable instance, evaluates the irrational profile in the arguments supplied.", "x = x - self.mu z = (x + 1j * self.gamma) /", "2 floats Full Width At Half Maximum, defaults to 1, ordered as Gaussian", "webpage http://mathworld.wolfram.com/LorentzianFunction.html: .. math:: \\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\"", "float Amplitude of the profile, defaults to 1. Returns ------- Irrational Callable instance,", "supplied. Note ---- The formula used is taken from the Wikipedia webpage http://en.wikipedia.org/wiki/Voigt_profile,", "self).__call__(top) class Irrational(Profile): r\"\"\"A callable normalized Irrational profile. Parameters ---------- fwhm: float Full", "the PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &=", "-9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG = np.polyval(a,", "/ self.gamma) ** 2) ** (-1.5)) / (2 * self.gamma) return super(Irrational, self).__call__(val)", "mu=None, amp=None, **kwargs): self.L = Lorentzian(**kwargs) self.G = Gaussian(**kwargs) self._n = np.abs(eta) if", "np.pi) def __call__(self, x): x = x - self.mu topPart = self.gamma bottomPart", "(self.sigma * sqrt2pi) ** (-1) def __call__(self, x): x = x - self.mu", "return super(HyperbolicSquared, self).__call__(coshPart / simplePart) class PseudoVoigt(Profile): r\"\"\"A callable normalized PseudoVoigt profile. Parameters", "\\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Gaussian, self).__init__(fwhm=fwhm,", "for different lineshapes, creating callables for easy and intuitive calculations. .. moduleauthor:: <NAME>", "* sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) self._normFactor = top def", "a weighted sum of the Gaussian, Lorentzian, Irrational and HyperbolicSquared profiles.\"\"\" def __init__(self,", "Gauss + Lorentz + Irrat + Hyper self._normFactor = val def __call__(self, x):", "if fwhm is not None else np.abs(1.0) self.mu = mu if mu is", "def fwhm(self, value): self._fwhm = value self.gamma = self.fwhm / (2 * np.log(np.sqrt(2)", "\\mathcal{L}\\left(x; \\mu, \\gamma\\right) &= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None,", "(self.sigma * sqrt2) top = wofz(z).real / (self.sigma * sqrt2pi) return super(Voigt, self).__call__(top)", "self.wG) self.wL = s * (1 - (1 - self.rho) * self.wL) self.wI", "* self.G(0) @property def n(self): return self._n @n.setter def n(self, value): value =", "the profile, defaults to 1. Attributes ---------- totalfwhm: float Approximation of the total", "taken from <NAME> et al. :cite:`Ida2000`, code inspired by the PhD thesis of", "np.abs(1.0) self.mu = mu if mu is not None else 0.0 self.amp =", "---- The used formula is taken from T. Ida et al. :cite:`Ida2000`, code", "self.G(x) Lorentz = self.nL * self.L(x) Irrat = self.nI * self.I(x) Hyper =", "super(Voigt, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm @fwhm.setter def fwhm(self,", "0.0 self.amp = amp if amp is not None else 1.0 def __repr__(self):", "s * (1 - (1 - self.rho) * self.wL) self.wI = s *", "np.log(2)) base_e = np.exp(1) class Profile(object): def __init__(self, fwhm=None, mu=None, amp=None, ampIsArea=False): super(Profile,", "---- Formula taken from <NAME> et al. :cite:`Ida2000`, code inspired by the PhD", "/ (2 * np.log(np.sqrt(2) + 1)) if not self.ampIsArea: self._normFactor = 1.0 /", "instance, evaluates the pseudovoigt profile in the arguments supplied. Note ---- The formula", "amp: float Amplitude of the profile, defaults to 1. Returns ------- Gaussian Callable", "else np.abs(1.0) self.mu = mu if mu is not None else 0.0 self.amp", "4) * self.fwhmL + 2.42843 * (self.fwhmG ** 3) * (self.fwhmL ** 2)", "self.nI - self.nH) * self.G(x) Lorentz = self.nL * self.L(x) Irrat = self.nI", "/ 5) if not self.ampIsArea: Gauss = (1 - self.nL - self.nI -", "self._normFactor = (self.sigma * sqrt2pi) ** (-1) def __call__(self, x): x = x", "PhD thesis of Deyan Yordanov :cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x-", "** (-1) def __call__(self, x): x = x - self.mu expPart = np.exp(-0.5", "0.5 self.sigma, self.gamma = seperate / self._fwhmNorm else: self.fwhmG, self.fwhmL = value, value", "defaults to 1. Returns ------- Hyperbolic Callable instance, evaluates the hyperbolic profile in", "* (self.fwhmG ** 2) * (self.fwhmL ** 3) + 0.07842 * self.fwhmG *", ":cite:`Yordanov2007`. .. math:: \\mathcal{I}\\left(x; \\mu, g\\right) &= \\frac{g}{2}\\left[1+\\left(\\frac{x- \\mu}{g}\\right)^2\\right]^{-3/2} FWHM &= \\sqrt{2^{2/3}-1}g\"\"\" def", "if value > 1: value = value - int(value) self._n = value if", "self.wH self.nL = self.rho * (1 + (1 - self.rho) * self.nL) self.nI", "Maximum, defaults to 1. mu: float Location of the center, defaults to 0.", "self._fwhm @fwhm.setter def fwhm(self, value): if isinstance(value, (list, tuple, np.ndarray)): seperate = value[0:2]", "the Gaussian and Lorentzian lineshapes: .. math:: \\mathcal{V}\\left(x; \\mu, \\eta, \\sigma, \\gamma\\right) =", "+ 0.07842 * self.fwhmG * (self.fwhmL ** 4) + self.fwhmL ** 5 )", "PhD thesis of <NAME> :cite:`Yordanov2007`. .. math:: H\\left(x;\\mu, g\\right) &= \\frac{1}{2g}\\cosh^{-2}\\left(\\frac{x-\\mu}{g} \\right) FWHM", "s * (1 - self.rho * self.wG) self.wL = s * (1 -", "[-17.80614, 57.92559, -73.61822, 47.06071, -15.36331, 1.43021, 1.19913]) d = np.array( [-1.26571, 4.05475, -4.55466,", "arguments supplied. Note ---- Formula taken from <NAME> et al. :cite:`Ida2000`, code inspired", "Note ---- The formula used is taken from the MathWorld webpage http://mathworld.wolfram.com/LorentzianFunction.html: ..", "def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def", "mu=None, amp=None, **kwargs): super(Irrational, self).__init__(fwhm=fwhm, mu=mu, amp=amp, **kwargs) @property def fwhm(self): return self._fwhm", "&= \\frac{\\gamma} {\\pi\\left(\\left(x-\\mu\\right)^2+\\gamma^2\\right)} FWHM &= 2\\gamma\"\"\" def __init__(self, fwhm=None, mu=None, amp=None, **kwargs): super(Lorentzian,", "self._normFactor = 1.0 / (self.gamma * np.pi) def __call__(self, x): x = x", "self.L(x) Irrat = self.nI * self.I(x) Hyper = self.nH * self.H(x) val =", "------- Irrational Callable instance, evaluates the irrational profile in the arguments supplied. Note", "MathWorld webpage http://mathworld.wolfram.com/GaussianFunction.html: .. math:: G(x;\\mu, \\sigma) &= \\frac{\\exp\\left(-\\frac{1}{2}\\left(\\frac{x-\\mu} {\\sigma}\\right)^2\\right)}{\\sqrt{2\\pi}\\sigma} FWHM &= s\\sqrt{2\\ln\\left(2\\right)}\\sigma\"\"\"", "self._normFactor += (1.0 - self.n) * self.G(0) @property def n(self): return self._n @n.setter", "G, L = seperate self._fwhm = 0.5346 * self.fwhmL + \\ (0.2166 *", "self.mu expPart = np.exp(-0.5 * (x / self.sigma) ** 2) normPart = self.sigma", "- self.mu expPart = np.exp(-0.5 * (x / self.sigma) ** 2) normPart =", "s.format(self.fwhm, self.mu, self.amp) return s def __call__(self, vals): if self.ampIsArea: factor = 1.0", "to 1. Returns ------- PseudoVoigt Callable instance, evaluates the pseudovoigt profile in the", "23.59717, -9.21815, 1.50429, 1.01579]) self.rho = self.fwhmL / (self.fwhmL + self.fwhmG) self.wG =", "self._fwhm = 0.5346 * self.fwhmL + \\ np.sqrt(0.2166 * self.fwhmL ** 2 +", "profile, defaults to 1. Returns ------- Lorentzian Callable instance, evaluates the Lorentzian profile", "= np.abs(value) if value > 1: value = value - int(value) self._n =", "supplied as FWHM are appropriately transformed to :math:`\\sigma` and :math:`\\gamma`: .. math:: V\\left(x;\\mu,", "instance, evaluates the extended Voigt profile in the arguments supplied. Note ---- Formula", "et al. :cite:`Ida2000`, code inspired by the PhD thesis of <NAME> :cite:`Yordanov2007`. ..", "def __call__(self, x): x = x - self.mu val = self.n * self.L(x)", "supplied. Note ---- The used formula is taken from T. Ida et al.", "Ordered as Gaussian, then Lorentzian. mu: float Location of the center, defaults to", "return self._fwhm @fwhm.setter def fwhm(self, value): self._fwhm = value self.sigma = self.fwhm /", "= x - self.mu Gauss = (1 - self.nL - self.nI - self.nH)", "['Gaussian', 'Lorentzian', 'Voigt', 'PseudoVoigt', 'ExtendedVoigt', 'Irrational', 'HyperbolicSquared'] sqrt2 = 2 ** 0.5 sqrt2pi", "/ (self.sigma * sqrt2pi) self._normFactor = top def __call__(self, x): x = x", "Returns ------- Lorentzian Callable instance, evaluates the Lorentzian profile in the arguments supplied." ]
[ "tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert", "return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key =", "tdmapsDBSchema) if data is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\":", "request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link =", "id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath,", "request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\":", "{0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id':", "'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result #####################################################", "result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id':", "sys #so that we can import globals sys.path.append('../..') from globals import * #backgroud", "return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global", "BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' #", "import ntpath import numpy as np from matplotlib import pyplot as plt import", "None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None:", "went wrong.\", \"url\": request.url, }, } return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format(", "tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id,", ") sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone()", "} }, } return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST'])", "tdmap_link } }, } return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods =", "None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id ,", "jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global", "sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath,", "= None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql", "len(request.data) > 0: data_dict = json.loads( request.data ) data = {} inserted_tdmap_id =", "= json.loads( request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema)", "folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid):", "cur.fetchone() if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id':", "methods = ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status =", ", exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id)", "\"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" : { \"code\": 404,", "id ) VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[ id_key ] )", ": { \"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code", "apiVersion global tdmapsDBPath global tdmapsDBSchema status = None data_dict = None if len(request.data)", "plt import sys #so that we can import globals sys.path.append('../..') from globals import", "tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into", "sys import ase.io.cif from ase import Atoms import zlib import sqlite3 import ntpath", ") data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format(", "get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result = { \"apiVersion\": apiVersion, \"params\":", "globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key", "None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result = {", "= ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None", "ase import Atoms import zlib import sqlite3 import ntpath import numpy as np", ": tdmap_link } }, } return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods", "result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close()", "of module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html')", "else: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\"", "cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string:", "> 0: data_dict = json.loads( request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None,", "= \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE", "'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT", "} return_code = 404 else: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\":", "json import sys import ase.io.cif from ase import Atoms import zlib import sqlite3", "simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary:", "result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result", "return_code = 404 else: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method,", ": inserted_tdmap_id } if status is None: result = { \"apiVersion\": apiVersion, \"params\":", "}, } return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def", "= None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id", "can import globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data(", "__name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods", "Atoms import zlib import sqlite3 import ntpath import numpy as np from matplotlib", "'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return", "job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath,", "= { 'id' : inserted_tdmap_id } if status is None: result = {", "result_binary = cur.fetchone() if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id':", "data is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\":", "went wrong.\", \"url\": request.url, }, } return_code = 404 else: result = {", ") status = True data = { 'id' : inserted_tdmap_id } if status", "import sqlite3 import ntpath import numpy as np from matplotlib import pyplot as", "0: data_dict = json.loads( request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict,", "\"took\": 0, \"data\" : data, \"links\" : { \"tdmap\" : { \"self\" :", "data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result = { \"apiVersion\":", "= get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result = { \"apiVersion\": apiVersion,", "wrong.\", \"url\": request.url, }, } return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url,", "inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT", "#name of module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps(): return", "import urllib.error import urllib.parse import os import json import sys import ase.io.cif from", "request.url, }, } return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid )", "import zlib import sqlite3 import ntpath import numpy as np from matplotlib import", "Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def", "\"links\" : { \"tdmap\" : { \"self\" : tdmap_link } }, } return_code", "tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result =", "sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps", "404, \"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code = 404 else:", "'id' : inserted_tdmap_id } if status is None: result = { \"apiVersion\": apiVersion,", "result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" :", "? );''' id_key = None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur =", "{ \"tdmap\" : { \"self\" : tdmap_link } }, } return_code = 200", "import secure_filename import urllib.request import urllib.error import urllib.parse import os import json import", "'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### #####################################################", "inserted_tdmap_id ) status = True data = { 'id' : inserted_tdmap_id } if", "@tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status", "data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url,", "id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string", "tdmaps = Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' # templates folder )", "= {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id", "\"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method,", "if status is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method,", "global tdmapsDBSchema status = None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema)", "\"method\": request.method, \"took\": 0, \"error\" : { \"code\": 404, \"message\": \"Something went wrong.\",", "= result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result =", "else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion, \"params\":", "import ase.io.cif from ase import Atoms import zlib import sqlite3 import ntpath import", "} if status is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\":", "{ \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" : data, \"links\"", "tdmapsDBSchema) sql = '''insert into tdmaps ( id ) VALUES( ? );''' id_key", "tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data = { 'id'", "status = True data = { 'id' : inserted_tdmap_id } if status is", "##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of module __name__, template_folder='templates'", "VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur", "= {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result = { 'id': result_binary[0],", ": data, \"links\" : { \"tdmap\" : { \"self\" : tdmap_link } },", "}, } return_code = 404 else: result = { \"apiVersion\": apiVersion, \"params\": request.args,", "None data_dict = None if len(request.data) > 0: data_dict = json.loads( request.data )", "save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True", "ase.io.cif from ase import Atoms import zlib import sqlite3 import ntpath import numpy", "apiVersion global tdmapsDBPath global tdmapsDBSchema status = None result = None data =", "0, \"data\" : data, \"links\" : { \"tdmap\" : { \"self\" : tdmap_link", "= True data = { 'id' : inserted_tdmap_id } if status is None:", "'''insert into tdmaps ( id ) VALUES( ? );''' id_key = None sqlite3_conn.execute(", "numpy as np from matplotlib import pyplot as plt import sys #so that", "id_key = None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select", "from flask_jsonpify import jsonpify from werkzeug import secure_filename import urllib.request import urllib.error import", "= None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is", "cur = sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id,", "id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql =", "jsonpify from werkzeug import secure_filename import urllib.request import urllib.error import urllib.parse import os", "templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def", "tdmapsDBSchema ): result = None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur", "tdmapsDBPath global tdmapsDBSchema status = None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath,", "json.loads( request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link", "tdmapsDBPath, tdmapsDBSchema) if data is None: result = { \"apiVersion\": apiVersion, \"params\": request.args,", "return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath", "methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status =", "create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id,", "\"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return", "import Flask, render_template, request, redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug import", "inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result", "\"self\" : tdmap_link } }, } return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup',", "def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None data_dict =", "status = None data_dict = None if len(request.data) > 0: data_dict = json.loads(", "Blueprint from flask_jsonpify import jsonpify from werkzeug import secure_filename import urllib.request import urllib.error", "= \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close()", "= cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id,", ": { \"tdmap\" : { \"self\" : tdmap_link } }, } return_code =", "'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] }", "\"Something went wrong.\", \"url\": request.url, }, } return_code = 404 else: tdmap_link =", "status is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\":", "= '''insert into tdmaps ( id ) VALUES( ? );''' id_key = None", "tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath)", "global tdmapsDBPath global tdmapsDBSchema status = None result = None data = get_tdmaps_data(tdmapid,", "): result = None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur =", "return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global", "import urllib.request import urllib.error import urllib.parse import os import json import sys import", "\"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code = 404 else: tdmap_link", "Flask, render_template, request, redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug import secure_filename", "flask import Flask, render_template, request, redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug", "import numpy as np from matplotlib import pyplot as plt import sys #so", "if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema", "'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id':", "zlib import sqlite3 import ntpath import numpy as np from matplotlib import pyplot", "request.args, \"method\": request.method, \"took\": 0, \"error\" : { \"code\": 404, \"message\": \"Something went", "import sys import ase.io.cif from ase import Atoms import zlib import sqlite3 import", "# templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET'])", "None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql =", "= cur.fetchone() if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2],", "dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if", "sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key", "( id ) VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[ id_key ]", "import jsonpify from werkzeug import secure_filename import urllib.request import urllib.error import urllib.parse import", "WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result = {", "404 else: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0,", "waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone()", "True data = { 'id' : inserted_tdmap_id } if status is None: result", "data_dict = json.loads( request.data ) data = {} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath,", ": { \"self\" : tdmap_link } }, } return_code = 200 return jsonpify(result),", "return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema", "cur.execute(sql) result_binary = cur.fetchone() if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1],", "id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id =", "pyplot as plt import sys #so that we can import globals sys.path.append('../..') from", "\"tdmap\" : { \"self\" : tdmap_link } }, } return_code = 200 return", "cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def", "result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ):", "urllib.error import urllib.parse import os import json import sys import ase.io.cif from ase", "#so that we can import globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data", "exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql)", "['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None result", "from matplotlib import pyplot as plt import sys #so that we can import", "request.url, }, } return_code = 404 else: result = { \"apiVersion\": apiVersion, \"params\":", "result = None inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor()", "index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath", "def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None result =", "result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None", "] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string =", "result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result", "request.host_url, inserted_tdmap_id ) status = True data = { 'id' : inserted_tdmap_id }", "= None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select =", "tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data =", "create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps ( id ) VALUES( ? );'''", "request, redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug import secure_filename import urllib.request", "inserted_tdmap_id } if status is None: result = { \"apiVersion\": apiVersion, \"params\": request.args,", "None if len(request.data) > 0: data_dict = json.loads( request.data ) data = {}", "{ \"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code =", "result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of module", "'tdmaps', #name of module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps():", "sql = '''insert into tdmaps ( id ) VALUES( ? );''' id_key =", "* #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None", "render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema", "\"took\": 0, \"error\" : { \"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url,", "\"method\": request.method, \"took\": 0, \"data\" : data, \"links\" : { \"tdmap\" : {", "import urllib.parse import os import json import sys import ase.io.cif from ase import", "= sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id", "{ 'id' : inserted_tdmap_id } if status is None: result = { \"apiVersion\":", "\"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data = { 'id' : inserted_tdmap_id", "##################################################### tdmaps = Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' # templates folder", "we can import globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job def", "inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps (", "0, \"error\" : { \"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url, },", "result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4],", "FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result", "{} inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id )", "= \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data = { 'id' :", "404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion,", "id = {0}\".format(id) cur.execute(sql) result_binary = cur.fetchone() if result_binary: result = { 'id':", "request.method, \"took\": 0, \"error\" : { \"code\": 404, \"message\": \"Something went wrong.\", \"url\":", "import Atoms import zlib import sqlite3 import ntpath import numpy as np from", "from ase import Atoms import zlib import sqlite3 import ntpath import numpy as", "request.args, \"method\": request.method, \"took\": 0, \"data\" : data, \"links\" : { \"tdmap\" :", "last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key", "sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key =", "result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" :", "= 404 else: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\":", "\"error\" : { \"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url, }, }", "tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data = {", "globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json,", "os import json import sys import ase.io.cif from ase import Atoms import zlib", "tdmapsDBSchema status = None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if", "sqlite3 import ntpath import numpy as np from matplotlib import pyplot as plt", "\"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" : { \"code\": 404, \"message\": \"Something", "is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0,", "inserted_key def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key = None", "= None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps ( id", "np from matplotlib import pyplot as plt import sys #so that we can", ") result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\"", "api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None data_dict = None", "= sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key", "werkzeug import secure_filename import urllib.request import urllib.error import urllib.parse import os import json", "None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\"", "} sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps',", "into tdmaps ( id ) VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[", "result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps", "tdmapsDBPath global tdmapsDBSchema status = None data_dict = None if len(request.data) > 0:", "save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql", "def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global", "= { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" : data,", "sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM", "result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### #####################", "apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" : { \"code\": 404, \"message\":", "= ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None", ") VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit()", "= { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" : {", "'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps =", "ntpath import numpy as np from matplotlib import pyplot as plt import sys", "template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods =", "cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary", "global tdmapsDBSchema status = None data_dict = None if len(request.data) > 0: data_dict", "None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps ( id )", "{ 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5],", "{ \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"error\" : { \"code\":", "\"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" : data, \"links\" :", "\"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code = 404 else: result", "\"url\": request.url, }, } return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid", "cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data( id, tdmapsDBPath,", "tdmapsDBSchema status = None data_dict = None if len(request.data) > 0: data_dict =", "flask_jsonpify import jsonpify from werkzeug import secure_filename import urllib.request import urllib.error import urllib.parse", "} return_code = 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup():", "slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id = {0}\".format(id) cur.execute(sql) result_binary =", "['POST']) def api_tdmaps_setup(): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None data_dict", "sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\"", "sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if", "= create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id, cells_conf_id,", "import json import sys import ase.io.cif from ase import Atoms import zlib import", "render_template, request, redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug import secure_filename import", "inserted_tdmap_id = save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status", "= save_tdmaps_setup_data(None, data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status =", "= \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\":", "sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps ( id ) VALUES(", "None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT", "matplotlib import pyplot as plt import sys #so that we can import globals", "return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of", "import pyplot as plt import sys #so that we can import globals sys.path.append('../..')", "}, } return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result", "global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None result = None data", "global tdmapsDBPath global tdmapsDBSchema status = None data_dict = None if len(request.data) >", "tdmaps ( id ) VALUES( ? );''' id_key = None sqlite3_conn.execute( sql,[ id_key", "@tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion", "import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key =", "global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None data_dict = None if", "def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema)", "data_dict, tdmapsDBPath, tdmapsDBSchema) tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, inserted_tdmap_id ) status = True data", "api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status = None result = None", "import os import json import sys import ase.io.cif from ase import Atoms import", "status = None result = None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data", ") @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global", "data, \"links\" : { \"tdmap\" : { \"self\" : tdmap_link } }, }", "if len(request.data) > 0: data_dict = json.loads( request.data ) data = {} inserted_tdmap_id", "apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" : data, \"links\" : {", "result_string = cur.fetchone() if result_string: inserted_key = result_string[0] sqlite3_conn.close() return inserted_key def get_tdmaps_data(", "##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of module __name__,", "result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint(", "tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\": apiVersion, \"params\": request.args,", "= 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = { \"apiVersion\":", "result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6] } sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT #####################", "= create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps ( id ) VALUES( ?", "data = { 'id' : inserted_tdmap_id } if status is None: result =", "= 200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion", "request.method, \"took\": 0, \"data\" : data, \"links\" : { \"tdmap\" : { \"self\"", "tdmapid ) result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method, \"took\": 0,", "from globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema):", "result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id': result_binary[5], 'simgrids_conf_id': result_binary[6]", "return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result = {", "= Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/')", "data_dict = None if len(request.data) > 0: data_dict = json.loads( request.data ) data", "from flask import Flask, render_template, request, redirect, Blueprint from flask_jsonpify import jsonpify from", "import sys #so that we can import globals sys.path.append('../..') from globals import *", "sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBPath) cur = sqlite3_conn.cursor() sql = \"SELECT id , exp_setup_conf_id,", "200 return jsonpify(result), return_code @tdmaps.route('/setup', methods = ['POST']) def api_tdmaps_setup(): global apiVersion global", "module __name__, template_folder='templates' # templates folder ) @tdmaps.route('/') def index_tdmaps(): return render_template('index_tdmaps.html') @tdmaps.route('/<string:tdmapid>',", "redirect, Blueprint from flask_jsonpify import jsonpify from werkzeug import secure_filename import urllib.request import", "urllib.parse import os import json import sys import ase.io.cif from ase import Atoms", "save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn =", "= None if len(request.data) > 0: data_dict = json.loads( request.data ) data =", "from werkzeug import secure_filename import urllib.request import urllib.error import urllib.parse import os import", "#backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id, tdmap_setup_json, tdmapsDBPath, tdmapsDBSchema): inserted_key = None sqlite3_conn", ");''' id_key = None sqlite3_conn.execute( sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor()", "as np from matplotlib import pyplot as plt import sys #so that we", "sql,[ id_key ] ) sqlite3_conn.commit() cur = sqlite3_conn.cursor() sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select)", "\"params\": request.args, \"method\": request.method, \"took\": 0, \"data\" : data, \"links\" : { \"tdmap\"", "secure_filename import urllib.request import urllib.error import urllib.parse import os import json import sys", "\"Something went wrong.\", \"url\": request.url, }, } return_code = 404 else: result =", "\"url\": request.url, }, } return_code = 404 else: result = { \"apiVersion\": apiVersion,", "sql_select = \"SELECT last_insert_rowid();\" cur.execute(sql_select) result_string = cur.fetchone() if result_string: inserted_key = result_string[0]", "wrong.\", \"url\": request.url, }, } return_code = 404 else: result = { \"apiVersion\":", "if result_binary: result = { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3],", "\"code\": 404, \"message\": \"Something went wrong.\", \"url\": request.url, }, } return_code = 404", "@tdmaps.route('/<string:tdmapid>', methods = ['GET']) def api_tdmaps_get(tdmapid): global apiVersion global tdmapsDBPath global tdmapsDBSchema status", "\"SELECT id , exp_setup_conf_id, cells_conf_id, slices_conf_id, waves_conf_id, dats_conf_id, simgrids_conf_id FROM tdmaps WHERE id", "def get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key = None sqlite3_conn", "= { 'id': result_binary[0], 'exp_setup_conf_id': result_binary[1], 'cells_conf_id': result_binary[2], 'slices_conf_id': result_binary[3], 'waves_conf_id': result_binary[4], 'dats_conf_id':", "import globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job def save_tdmaps_setup_data( id,", "if data is None: result = { \"apiVersion\": apiVersion, \"params\": request.args, \"method\": request.method,", "= None data = get_tdmaps_data(tdmapid, tdmapsDBPath, tdmapsDBSchema) if data is None: result =", "} return_code = 404 else: tdmap_link = \"{0}api/tdmaps/{1}\".format( request.host_url, tdmapid ) result =", "urllib.request import urllib.error import urllib.parse import os import json import sys import ase.io.cif", "\"data\" : data, \"links\" : { \"tdmap\" : { \"self\" : tdmap_link }", "{ \"self\" : tdmap_link } }, } return_code = 200 return jsonpify(result), return_code", "as plt import sys #so that we can import globals sys.path.append('../..') from globals", "tdmapsDBSchema): inserted_key = None sqlite3_conn = create_or_open_db(tdmapsDBPath, tdmapsDBSchema) sql = '''insert into tdmaps", "= None data_dict = None if len(request.data) > 0: data_dict = json.loads( request.data", "that we can import globals sys.path.append('../..') from globals import * #backgroud save_cells_unitcells_data job", "get_tdmaps_data( id, tdmapsDBPath, tdmapsDBSchema ): result = None inserted_key = None sqlite3_conn =", "##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name of module __name__, template_folder='templates' # templates", "sqlite3_conn.close() return result ##################################################### ##################### BLUEPRINT ##################### ##################################################### tdmaps = Blueprint( 'tdmaps', #name" ]
[ "Ss = [] for m in range(depth): U = Us[m] S = np.sum(U,", "den, N): omegas = np.array(range(N)) return gauss(omegas - center, den) def gauss(omega, den):", "phi[0] = 1.0 filterbank[:, 0, -1] = phi for m in range(dim): filterbank", "np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas", "j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi", "rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi =", "in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1],", "omega_start = p_start * N omega_stop = p_stop * N omegas = np.array(range(omega_start,", "range(p_start, p_stop) b = np.array([gauss(p*N - center, den) for p in p_range]) A", "for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm)", "nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S =", "xi_j * N den = 2 * sigma_j * sigma_j * N *", "den, N, n_periods=4) filterbank[:, 0, j] = psi for j in range(J_fr-2, 2*(J_fr-2)):", "center = xi_j * N den = 2 * sigma_j * sigma_j *", "Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr = [] for m", "rev_phi phi[0] = 1.0 filterbank[:, 0, -1] = phi for m in range(dim):", "def gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas - center, den) def", "in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N): omegas", "1.0 filterbank[:, 0, -1] = phi for m in range(dim): filterbank = np.expand_dims(filterbank,", "n_periods=4) filterbank[:, 0, j] = psi for j in range(J_fr-2, 2*(J_fr-2)): psi =", "- np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y =", "0.0 den_phi = sigma_phi * sigma_phi * N * N phi = gabor(center_phi,", "U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m]", "for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)] rev_psi", "rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0] = 1.0 filterbank[:,", "numpy as np import os from scipy.fftpack import fft, ifft def get_composers(): return", "for j in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j = sigma *", "temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss = [] for", "= np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S)", "range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1]))", "1 p_start = - ((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1) >>", "= filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if", "setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr = [] for m in range(depth):", "* 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi * sigma_phi * N *", "m21 import numpy as np import os from scipy.fftpack import fft, ifft def", "return (n%2 == 0) def morlet(center, den, N, n_periods): half_N = N >>", "= frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16):", "filterbanks_tm = [] filterbanks_fr = [] for m in range(depth): filterbank_tm = temporal_filterbank(2*m,", "= rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi", "p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y", "1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U = np.abs(Y_fr)", "[\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth):", "return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for m in range(depth+1): layers.append(Sx[m].flatten())", "axis=-1) Y_ft = U_ft * filterbank Y = ifft(Y_ft, axis=dim) return Y def", "ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr =", "raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def", "get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def", "\"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0,", "Y = scatter(U, filterbank, 1) if nonlinearity == \"abs\": U = np.abs(Y) else:", "= np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U, filterbank, 1) if", "y = np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return y def scatter(U,", "dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft *", "for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks,", "= [] filterbanks_fr = [] for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm)", "for j in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j = sigma *", "n_periods): half_N = N >> 1 p_start = - ((n_periods-1) >> 1) -", "range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N): omegas =", "N, n_periods): half_N = N >> 1 p_start = - ((n_periods-1) >> 1)", "n_periods)) for p in range(n_periods): offset = (p_start + p) * N corrective_gaussians[:,", "axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity ==", "den): return np.exp(- omega*omega / den) def is_even(n): return (n%2 == 0) def", "omega*omega / den) def is_even(n): return (n%2 == 0) def morlet(center, den, N,", ">> 1) + 1 omega_start = p_start * N omega_stop = p_stop *", "np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return", "den) def is_even(n): return (n%2 == 0) def morlet(center, den, N, n_periods): half_N", "y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods,", "= filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm,", "half_N = N >> 1 p_start = - ((n_periods-1) >> 1) - is_even(n_periods)", "= len(filterbanks_tm) Us = [pianoroll] Ss = [] for m in range(depth): U", "np.array([gauss(p*N - center, den) for p in p_range]) A = np.array([gauss((q-p)*N, den) for", "+ 1 omega_start = p_start * N omega_stop = p_stop * N omegas", "= np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi = 2.0 * sigma", "return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss", "get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for m in range(depth+1):", "as m21 import numpy as np import os from scipy.fftpack import fft, ifft", "= xi_j * N den = 2 * sigma_j * sigma_j * N", "0, j] = rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi =", "2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j = xi", "* N psi = morlet(center, den, N, n_periods=4) filterbank[:, 0, j] = psi", "= [pianoroll] Ss = [] for m in range(depth): U = Us[m] S", "= (p_start + p) * N corrective_gaussians[:, p] = gauss(omegas - offset, den)", "Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm =", "phi + rev_phi phi[0] = 1.0 filterbank[:, 0, -1] = phi for m", "return gauss(omegas - center, den) def gauss(omega, den): return np.exp(- omega*omega / den)", "fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y =", "range(depth): U = Us[m] S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y", "return np.exp(- omega*omega / den) def is_even(n): return (n%2 == 0) def morlet(center,", "offset, den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N - center, den) for", "np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset = (p_start + p) * N", "1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j =", "N psi = morlet(center, den, N, n_periods=4) filterbank[:, 0, j] = psi for", "= 1.0 filterbank[:, 0, -1] = phi for m in range(dim): filterbank =", "den) def gauss(omega, den): return np.exp(- omega*omega / den) def is_even(n): return (n%2", "N phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi", "p_start * N omega_stop = p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center", "axis=0) return y def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft =", "gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset", "j] = rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0", "A = np.array([gauss((q-p)*N, den) for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods)", "import numpy as np import os from scipy.fftpack import fft, ifft def get_composers():", "import os from scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def", "= phi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def", "rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi = 2.0 *", "= fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y", "= np.array([gauss(p*N - center, den) for p in p_range]) A = np.array([gauss((q-p)*N, den)", "sigma_j * N * N psi = morlet(center, den, N, n_periods=4) filterbank[0, :,", "def morlet(center, den, N, n_periods): half_N = N >> 1 p_start = -", "xi * 2**(-j) sigma_j = sigma * 2**(-j) center = xi_j * N", "n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y =", "scatter(U, filterbank, 1) if nonlinearity == \"abs\": U = np.abs(Y) else: raise NotImplementedError", "= 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j =", "offset = (p_start + p) * N corrective_gaussians[:, p] = gauss(omegas - offset,", "N psi = morlet(center, den, N, n_periods=4) filterbank[0, :, j] = psi for", "axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr", "\"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0,", "def is_even(n): return (n%2 == 0) def morlet(center, den, N, n_periods): half_N =", "if nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S", "return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N,", "phi[1:][::-1])) phi = phi + rev_phi phi[0] = 1.0 filterbank[:, 0, -1] =", "* 2**(-j) sigma_j = sigma * 2**(-j) center = xi_j * N den", "omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods,", "N, n_periods=4) filterbank[:, 0, j] = psi for j in range(J_fr-2, 2*(J_fr-2)): psi", "range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center", "fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name():", "filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N): omegas = np.array(range(N))", "2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi", "J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm,", "b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y,", "depth): layers = [] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim,", "in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return", "range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth =", "+ p) * N corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range =", "den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0] =", "- (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi =", "corrective_factors = np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y)", "import Memory import math import music21 as m21 import numpy as np import", "filterbank_fr, 0) if nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U)", "U_ft * filterbank Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth):", "axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll]", "N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians =", "filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N,", "y = np.sum(y, axis=0) return y def scatter(U, filterbank, dim): U_ft = fft(U,", "= psi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def", "= gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi", "j in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j)", "range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank", "1) + 1 omega_start = p_start * N omega_stop = p_stop * N", "np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity", "depth = len(filterbanks_tm) Us = [pianoroll] Ss = [] for m in range(depth):", "j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)] rev_psi =", "Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity ==", "np import os from scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"]", "den) for p in p_range]) A = np.array([gauss((q-p)*N, den) for p in range(n_periods)", "filterbank = np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j = xi *", "morlet(center, den, N, n_periods=4) filterbank[0, :, j] = psi for m in range(dim):", "from scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return", "= np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset = (p_start + p) *", "filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss = [] for", "is_even(n): return (n%2 == 0) def morlet(center, den, N, n_periods): half_N = N", "xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in", "filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U =", "* N * N psi = morlet(center, den, N, n_periods=4) filterbank[:, 0, j]", "2**(-j) center = xi_j * N den = 2 * sigma_j * sigma_j", "N * N psi = morlet(center, den, N, n_periods=4) filterbank[:, 0, j] =", "= - ((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1) +", "1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1 omega_start = p_start", "* sigma_j * sigma_j * N * N psi = morlet(center, den, N,", "= np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j = xi * 2**(-j)", "nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll]", "= filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss = [] for m", "scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U", "in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth", "= temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def", "frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for", "den) for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A,", "j in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j)", "0, j] = psi for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0,", "from joblib import Memory import math import music21 as m21 import numpy as", "if nonlinearity == \"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S", "np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1,", "= ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr", "morlet(center, den, N, n_periods): half_N = N >> 1 p_start = - ((n_periods-1)", "= scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\":", "N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j", "[pianoroll] Ss = [] for m in range(depth): U = Us[m] S =", "(p_start + p) * N corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range", "psi = morlet(center, den, N, n_periods=4) filterbank[:, 0, j] = psi for j", "N, n_periods=4) filterbank[0, :, j] = psi for m in range(dim): filterbank =", "scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft =", "np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return y def scatter(U, filterbank, dim):", "corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset = (p_start + p)", "in range(n_periods): offset = (p_start + p) * N corrective_gaussians[:, p] = gauss(omegas", "temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for", "gauss(omegas - offset, den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N - center,", "np.exp(- omega*omega / den) def is_even(n): return (n%2 == 0) def morlet(center, den,", "np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return", "filterbanks_fr = [] for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr =", "= filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity == \"abs\": U =", "range(depth): U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr =", "gauss_center = gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in", "= np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return y def scatter(U, filterbank,", "2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j = sigma", "= [] for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1,", "import music21 as m21 import numpy as np import os from scipy.fftpack import", "= scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise", "= len(filterbanks) Us = [pianoroll] Ss = [] for m in range(depth): U", "get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = []", "music21 as m21 import numpy as np import os from scipy.fftpack import fft,", "Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity):", "def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for m in", "den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N - center, den) for p", "nonlinearity == \"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S =", "center_phi = 0.0 den_phi = sigma_phi * sigma_phi * N * N phi", "= np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods))", "in range(depth): U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr", "omegas = np.array(range(N)) return gauss(omegas - center, den) def gauss(omega, den): return np.exp(-", "import math import music21 as m21 import numpy as np import os from", "xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j) center = xi_j", "np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0] = 1.0 filterbank[:, 0, -1]", "((n_periods-1) >> 1) + 1 omega_start = p_start * N omega_stop = p_stop", "-1] = phi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank", ">> 1 p_start = - ((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1)", "return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank =", "Us[m] S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U, filterbank,", "p_stop) b = np.array([gauss(p*N - center, den) for p in p_range]) A =", "temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim,", "os from scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir():", "= ((n_periods-1) >> 1) + 1 omega_start = p_start * N omega_stop =", "(n_periods, N)) y = np.sum(y, axis=0) return y def scatter(U, filterbank, dim): U_ft", "filterbank[:, 0, -1] = phi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2)", "p_range = range(p_start, p_stop) b = np.array([gauss(p*N - center, den) for p in", "sigma_phi * sigma_phi * N * N phi = gabor(center_phi, den_phi, N) rev_phi", "U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank", "2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:,", "np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return y def", "/ den) def is_even(n): return (n%2 == 0) def morlet(center, den, N, n_periods):", "U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y = ifft(Y_ft, axis=dim)", "def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\"", "= N >> 1 p_start = - ((n_periods-1) >> 1) - is_even(n_periods) p_stop", "[] for m in range(depth): U = Us[m] S = np.sum(U, axis=(0, 1))", "U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1))", "S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm", "j] = psi for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j", "U = Us[m] S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y =", "center, den) def gauss(omega, den): return np.exp(- omega*omega / den) def is_even(n): return", "np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y = np.sum(y,", "layers = [] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr,", "filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss =", "filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us =", "* sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi * sigma_phi *", "gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y", "filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank", "m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity):", "axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr = []", "= np.array(range(N)) return gauss(omegas - center, den) def gauss(omega, den): return np.exp(- omega*omega", "S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U, filterbank, 1)", "= p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center,", "n_periods=4) filterbank[0, :, j] = psi for m in range(dim): filterbank = np.expand_dims(filterbank,", "range(J_fr-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j) center =", "filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss", "Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks,", "frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N", "psi = morlet(center, den, N, n_periods=4) filterbank[0, :, j] = psi for m", "* N corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range = range(p_start, p_stop)", "= [] for m in range(depth): U = Us[m] S = np.sum(U, axis=(0,", "gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0]", "m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N =", "1 omega_start = p_start * N omega_stop = p_stop * N omegas =", "== \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U,", "p in p_range]) A = np.array([gauss((q-p)*N, den) for p in range(n_periods) for q", "def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers =", "psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2))", "N omega_stop = p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas", "return filterbank def gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas - center,", "depth = len(filterbanks) Us = [pianoroll] Ss = [] for m in range(depth):", "np.sum(y, axis=0) return y def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft", "np.array([gauss((q-p)*N, den) for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors =", "def concatenate_layers(Sx, depth): layers = [] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers)", "filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity == \"abs\": U = np.abs(Y)", "xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j in", "filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss = [] for m in", "= filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j]", "omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p", "= np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi * 2**(-j)", "def gauss(omega, den): return np.exp(- omega*omega / den) def is_even(n): return (n%2 ==", "* sigma_j * N * N psi = morlet(center, den, N, n_periods=4) filterbank[:,", "filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4,", "S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U,", "= np.array([gauss((q-p)*N, den) for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors", "y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return", "= np.sum(y, axis=0) return y def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim)", "p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den)", "filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] =", "m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr)", "J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j", "2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi * sigma_phi * N * N", "Us = [pianoroll] Ss = [] for m in range(depth): U = Us[m]", "den, N, n_periods): half_N = N >> 1 p_start = - ((n_periods-1) >>", "axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr", "N): omegas = np.array(range(N)) return gauss(omegas - center, den) def gauss(omega, den): return", "in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr", "Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth =", "den_phi = sigma_phi * sigma_phi * N * N phi = gabor(center_phi, den_phi,", "np.array(range(N)) return gauss(omegas - center, den) def gauss(omega, den): return np.exp(- omega*omega /", "nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss = [] for m in", "sigma * 2**(-j) center = xi_j * N den = 2 * sigma_j", "omega_stop = p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas -", "m in range(depth): U = Us[m] S = np.sum(U, axis=(0, 1)) filterbank =", "== 0) def morlet(center, den, N, n_periods): half_N = N >> 1 p_start", "Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1]", "concatenate_layers(Sx, depth): layers = [] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def", "for p in range(n_periods): offset = (p_start + p) * N corrective_gaussians[:, p]", "2 * sigma_j * sigma_j * N * N psi = morlet(center, den,", "= psi for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j -", "U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1))", "N corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range = range(p_start, p_stop) b", "np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y =", "filterbank, dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft", "range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y", "in p_range]) A = np.array([gauss((q-p)*N, den) for p in range(n_periods) for q in", "N den = 2 * sigma_j * sigma_j * N * N psi", "as np import os from scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\",", "sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2):", "q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians,", "return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth", "* filterbank Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm", "layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank =", "1) if nonlinearity == \"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S)", "phi = phi + rev_phi phi[0] = 1.0 filterbank[:, 0, -1] = phi", "gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas - center, den) def gauss(omega,", "filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr,", "is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1 omega_start = p_start * N", "N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0] = 1.0", "filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm", "J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j", "den, N, n_periods=4) filterbank[0, :, j] = psi for m in range(dim): filterbank", "return y def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft,", "\"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for m in range(depth+1): layers.append(Sx[m].flatten()) return", "* N * N phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1]))", "gauss(omegas - center, den) def gauss(omega, den): return np.exp(- omega*omega / den) def", "filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi *", "m in range(depth): U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m]", "= np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us", "1)) filterbank = filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity == \"abs\":", "[] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16):", "\"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers", "= gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N))", "0, -1] = phi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return", "depth): filterbanks_tm = [] filterbanks_fr = [] for m in range(depth): filterbank_tm =", "+ rev_phi phi[0] = 1.0 filterbank[:, 0, -1] = phi for m in", "ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return", "np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for", "Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = []", "return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for", "[] for m in range(depth): U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm", "= U_ft * filterbank Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr,", "for m in range(depth): U = Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm =", "= [] for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4,", "filterbank[:, 0, j] = psi for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:,", "= np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors) y = np.fft.fftshift(y) y", "filterbank[:, 0, j] = rev_psi sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi", "in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center - np.dot(corrective_gaussians, corrective_factors)", "return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr = [] for", "== \"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U,", "1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr =", "filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity", "np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us =", "= 2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi *", "N * N phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi", "= gauss(omegas - center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods):", "* N omega_stop = p_stop * N omegas = np.array(range(omega_start, omega_stop)) gauss_center =", "(J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi = 2.0", "0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi", "sigma_j * sigma_j * N * N psi = morlet(center, den, N, n_periods=4)", "m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N):", "N)) y = np.sum(y, axis=0) return y def scatter(U, filterbank, dim): U_ft =", "for m in range(depth+1): layers.append(Sx[m].flatten()) return np.concatenate(layers) def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N", "filterbank Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm =", "- center, den) for p in p_range]) A = np.array([gauss((q-p)*N, den) for p", "= sigma_phi * sigma_phi * N * N phi = gabor(center_phi, den_phi, N)", "for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den,", "for p in p_range]) A = np.array([gauss((q-p)*N, den) for p in range(n_periods) for", "j] = psi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank", "scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError", "center, den) for p in p_range]) A = np.array([gauss((q-p)*N, den) for p in", "p_stop = ((n_periods-1) >> 1) + 1 omega_start = p_start * N omega_stop", "for m in range(depth): U = Us[m] S = np.sum(U, axis=(0, 1)) filterbank", "timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us", "= range(p_start, p_stop) b = np.array([gauss(p*N - center, den) for p in p_range])", "= np.abs(Y) else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S)", "import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def", "= morlet(center, den, N, n_periods=4) filterbank[0, :, j] = psi for m in", "(n%2 == 0) def morlet(center, den, N, n_periods): half_N = N >> 1", "y def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1)", "corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range = range(p_start, p_stop) b =", "(filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1,", "[] filterbanks_fr = [] for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr", "* N phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi =", "= Us[m] S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m] Y = scatter(U,", "* 2**(-j) center = xi_j * N den = 2 * sigma_j *", ">> 1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1 omega_start =", "len(filterbanks_tm) Us = [pianoroll] Ss = [] for m in range(depth): U =", "= 2 * sigma_j * sigma_j * N * N psi = morlet(center,", "sigma_phi = 2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi", "= np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi + rev_phi phi[0] = 1.0 filterbank[:, 0,", "= p_start * N omega_stop = p_stop * N omegas = np.array(range(omega_start, omega_stop))", "N * N psi = morlet(center, den, N, n_periods=4) filterbank[0, :, j] =", "J_fr, depth): filterbanks_tm = [] filterbanks_fr = [] for m in range(depth): filterbank_tm", "def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm)", "psi for j in range(J_fr-2, 2*(J_fr-2)): psi = filterbank[:, 0, j - (J_fr-2)]", "psi = filterbank[:, 0, j - (J_fr-2)] rev_psi = np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0,", "- ((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1", ":, j] = psi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return", "math import music21 as m21 import numpy as np import os from scipy.fftpack", "scipy.fftpack import fft, ifft def get_composers(): return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\"", "sigma_j = sigma * 2**(-j) center = xi_j * N den = 2", "p] = gauss(omegas - offset, den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N", "= np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y = ifft(Y_ft, axis=dim) return", "- is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1 omega_start = p_start *", "def scatter(U, filterbank, dim): U_ft = fft(U, axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft", "p in range(n_periods): offset = (p_start + p) * N corrective_gaussians[:, p] =", "filterbank = filterbanks[m] Y = scatter(U, filterbank, 1) if nonlinearity == \"abs\": U", "= Us[m] S = np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm", "= np.expand_dims(filterbank, axis=2) return filterbank def gabor(center, den, N): omegas = np.array(range(N)) return", "NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll,", "for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y = gauss_center -", "phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1], phi[1:][::-1])) phi = phi +", "def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2))", "J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr) def temporal_filterbank(dim, J_tm, xi=0.4, sigma=0.16): N =", "filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks)", "* N * N psi = morlet(center, den, N, n_periods=4) filterbank[0, :, j]", "sigma_j * N * N psi = morlet(center, den, N, n_periods=4) filterbank[:, 0,", "J_tm-2)) for j in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j = sigma", "filterbank, 1) if nonlinearity == \"abs\": U = np.abs(Y) else: raise NotImplementedError Us.append(U)", "p) * N corrective_gaussians[:, p] = gauss(omegas - offset, den) p_range = range(p_start,", "filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr = scatter(Y_tm, filterbank_fr, 0)", "range(J_tm-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j) center =", "N, J_tm-2)) for j in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j =", "filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm, filterbanks_fr)", "gauss(omega, den): return np.exp(- omega*omega / den) def is_even(n): return (n%2 == 0)", "* N den = 2 * sigma_j * sigma_j * N * N", "len(filterbanks) Us = [pianoroll] Ss = [] for m in range(depth): U =", "= [] for m in range(depth): U = Us[m] S = np.sum(U, axis=(0,1))", "sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi * sigma_phi * N", "np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j", "= filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss =", "p_range]) A = np.array([gauss((q-p)*N, den) for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods,", "filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss = [] for m", "N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1)) for j in range(J_fr-2): xi_j", "center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset = (p_start", "- center, den) def gauss(omega, den): return np.exp(- omega*omega / den) def is_even(n):", "phi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def gabor(center,", "- offset, den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N - center, den)", "np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y = ifft(Y_ft, axis=dim) return Y", "((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1) + 1 omega_start", "2**(-j) sigma_j = sigma * 2**(-j) center = xi_j * N den =", "in range(depth): U = Us[m] S = np.sum(U, axis=(0, 1)) filterbank = filterbanks[m]", "return [\"Haydn\", \"Mozart\"] def get_data_dir(): return \"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx,", "sigma=0.16): N = 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j in range(J_tm-2):", "2.0 * sigma * 2**(-(J_fr-2)) center_phi = 0.0 den_phi = sigma_phi * sigma_phi", "0) if nonlinearity == \"abs\": U = np.abs(Y_fr) else: raise NotImplementedError Us.append(U) Ss.append(S)", "[] for m in range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr)", "= phi + rev_phi phi[0] = 1.0 filterbank[:, 0, -1] = phi for", "sigma_phi * N * N phi = gabor(center_phi, den_phi, N) rev_phi = np.concatenate((phi[0:1],", "\"/scratch/vl1019/nemisig2018_data\" def get_dataset_name(): return \"nemisig2018\" def concatenate_layers(Sx, depth): layers = [] for m", "psi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2) return filterbank def temporal_scattering(pianoroll,", "range(n_periods): offset = (p_start + p) * N corrective_gaussians[:, p] = gauss(omegas -", "* sigma_j * N * N psi = morlet(center, den, N, n_periods=4) filterbank[0,", "= np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0) return y", "= 2**J_tm filterbank = np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j =", "def temporal_scattering(pianoroll, filterbanks, nonlinearity): depth = len(filterbanks) Us = [pianoroll] Ss = []", "= morlet(center, den, N, n_periods=4) filterbank[:, 0, j] = psi for j in", "= np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm =", "= scatter(U, filterbank, 1) if nonlinearity == \"abs\": U = np.abs(Y) else: raise", "= xi * 2**(-j) sigma_j = sigma * 2**(-j) center = xi_j *", "corrective_factors) y = np.fft.fftshift(y) y = np.reshape(y, (n_periods, N)) y = np.sum(y, axis=0)", "filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1) Y_fr =", "def frequential_filterbank(dim, J_fr, xi=0.4, sigma=0.16): N = 2**J_fr filterbank = np.zeros((N, 1, 2*(J_fr-2)+1))", "filterbanks[0] filterbanks_fr = filterbanks[1] depth = len(filterbanks_tm) Us = [pianoroll] Ss = []", "= gauss(omegas - offset, den) p_range = range(p_start, p_stop) b = np.array([gauss(p*N -", "morlet(center, den, N, n_periods=4) filterbank[:, 0, j] = psi for j in range(J_fr-2,", "else: raise NotImplementedError Us.append(U) Ss.append(S) S = np.sum(U, axis=(0, 1)) Ss.append(S) return Ss", "in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b) y =", "joblib import Memory import math import music21 as m21 import numpy as np", "np.sum(U, axis=(0, 1)) Ss.append(S) return Ss def timefrequency_scattering(pianoroll, filterbanks, nonlinearity): filterbanks_tm = filterbanks[0]", "den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset = (p_start +", "N >> 1 p_start = - ((n_periods-1) >> 1) - is_even(n_periods) p_stop =", "np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm, 1)", "Memory import math import music21 as m21 import numpy as np import os", "np.concatenate((psi[0:1], psi[1:][::-1])) filterbank[:, 0, j] = rev_psi sigma_phi = 2.0 * sigma *", "= 0.0 den_phi = sigma_phi * sigma_phi * N * N phi =", "= sigma * 2**(-j) center = xi_j * N den = 2 *", "for p in range(n_periods) for q in range(n_periods)]).reshape(n_periods, n_periods) corrective_factors = np.linalg.solve(A, b)", "= np.sum(U, axis=(0,1)) filterbank_tm = filterbanks_tm[m] filterbank_fr = filterbanks_fr[m] Y_tm = scatter(U, filterbank_tm,", "* N omegas = np.array(range(omega_start, omega_stop)) gauss_center = gauss(omegas - center, den) corrective_gaussians", "Y_fr = scatter(Y_tm, filterbank_fr, 0) if nonlinearity == \"abs\": U = np.abs(Y_fr) else:", "in range(J_fr-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j) center", "* N psi = morlet(center, den, N, n_periods=4) filterbank[0, :, j] = psi", "filterbank def gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas - center, den)", "axis=2) return filterbank def gabor(center, den, N): omegas = np.array(range(N)) return gauss(omegas -", "0) def morlet(center, den, N, n_periods): half_N = N >> 1 p_start =", "b = np.array([gauss(p*N - center, den) for p in p_range]) A = np.array([gauss((q-p)*N,", "axis=dim) U_ft = np.expand_dims(U_ft, axis=-1) Y_ft = U_ft * filterbank Y = ifft(Y_ft,", "np.zeros((1, N, J_tm-2)) for j in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j", "p_start = - ((n_periods-1) >> 1) - is_even(n_periods) p_stop = ((n_periods-1) >> 1)", "def setup_timefrequency_scattering(J_tm, J_fr, depth): filterbanks_tm = [] filterbanks_fr = [] for m in", "in range(J_tm-2): xi_j = xi * 2**(-j) sigma_j = sigma * 2**(-j) center", "Y_ft = U_ft * filterbank Y = ifft(Y_ft, axis=dim) return Y def setup_timefrequency_scattering(J_tm,", "* sigma_phi * N * N phi = gabor(center_phi, den_phi, N) rev_phi =", "filterbank[0, :, j] = psi for m in range(dim): filterbank = np.expand_dims(filterbank, axis=2)", "- center, den) corrective_gaussians = np.zeros((N*n_periods, n_periods)) for p in range(n_periods): offset =", "den = 2 * sigma_j * sigma_j * N * N psi =", "range(depth): filterbank_tm = temporal_filterbank(2*m, J_tm) filterbank_fr = frequential_filterbank(2*m+1, J_fr) filterbanks_tm.append(filterbank_tm) filterbanks_fr.append(filterbank_fr) return (filterbanks_tm," ]
[ "response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch(", "method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200", "def get_app(self): extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest):", "self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>URL", "<filename>tests/test_web.py from unittest import mock import tornado.testing import tornado.web import tornado.websocket import mopidy.config", "from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config =", "from unittest import mock import tornado.testing import tornado.web import tornado.websocket import mopidy.config as", "class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code == 200 response", "mopidy.config as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension =", "tornado.web import tornado.websocket import mopidy.config as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase):", "tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code", "import tornado.websocket import mopidy.config as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def", "return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code", "import mopidy.config as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension", "200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\",", "\"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>URL Added</title>\"", "assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code ==", "config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config", "method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response", ") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>URL Added</title>\" in body", "self.fetch(\"/\", method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code ==", "config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert", "response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body =", "get_app(self): extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def", "self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\",", "mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code == 200", "mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config = config.Proxy({})", "200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert", "extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self):", "== 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body)", "= self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert", "in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body", "Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response =", "def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\",", "tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code ==", "self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body", "body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", )", "test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\")", "assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body", "Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config,", "WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\") assert response.code == 200 response =", "import mock import tornado.testing import tornado.web import tornado.websocket import mopidy.config as config from", "= self.fetch(\"/\", method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code", "response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\"", "response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body = tornado.escape.to_unicode(response.body)", "\"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200", "method=\"GET\", ) assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>URL Added</title>\" in", "body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert response.code == 200 body =", "import tornado.web import tornado.websocket import mopidy.config as config from mopidy_bandcamp import Extension class", "import tornado.testing import tornado.web import tornado.websocket import mopidy.config as config from mopidy_bandcamp import", "as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension()", "= self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in", "class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock()))", "== 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\",", "unittest import mock import tornado.testing import tornado.web import tornado.websocket import mopidy.config as config", "tornado.testing import tornado.web import tornado.websocket import mopidy.config as config from mopidy_bandcamp import Extension", "import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config = config.Proxy({}) return", "mock import tornado.testing import tornado.web import tornado.websocket import mopidy.config as config from mopidy_bandcamp", "= config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response = self.fetch(\"/\", method=\"GET\")", "response = self.fetch(\"/\", method=\"GET\") assert response.code == 200 response = self.fetch(\"/?url=https%3A%2F%2Fgoogle.com%2F\", method=\"GET\") assert", "= tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response = self.fetch( \"/?url=https%3A%2F%2Flouiezong.bandcamp.com%2Ftrack%2Fbrain-age\", method=\"GET\", ) assert", "= Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class WebHandlerTest(BaseTest): def test_index(self): response", "BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): extension = Extension() self.config = config.Proxy({}) return tornado.web.Application(extension.factory(self.config, mock.Mock())) class", "assert response.code == 200 body = tornado.escape.to_unicode(response.body) assert \"<title>Error</title>\" in body response =", "tornado.websocket import mopidy.config as config from mopidy_bandcamp import Extension class BaseTest(tornado.testing.AsyncHTTPTestCase): def get_app(self):" ]
[ "* mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist = list()", "glowlist = list() classlist = list() shinylist = list() smoothlist = list() translist", "mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess) smoothlist.append(mat.Smoothness) translist.append(mat.Transparency)", "= UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist = list() shinylist =", "smoothlist = list() translist = list() for mat in mats: colorlist.append(mat.Color) if mat.Glow:", "import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist =", "= list() translist = list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True)", "import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list()", "list() smoothlist = list() translist = list() for mat in mats: colorlist.append(mat.Color) if", "list() shinylist = list() smoothlist = list() translist = list() for mat in", "list() glowlist = list() classlist = list() shinylist = list() smoothlist = list()", "list() translist = list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else:", "UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist = list() shinylist = list()", "mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess) smoothlist.append(mat.Smoothness) translist.append(mat.Transparency) OUT =", "= list() smoothlist = list() translist = list() for mat in mats: colorlist.append(mat.Color)", "for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess) smoothlist.append(mat.Smoothness)", "colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess) smoothlist.append(mat.Smoothness) translist.append(mat.Transparency) OUT = (classlist,colorlist,glowlist,shinylist,smoothlist,translist)", "from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list()", "translist = list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False)", "clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist =", "= list() classlist = list() shinylist = list() smoothlist = list() translist =", "in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess) smoothlist.append(mat.Smoothness) translist.append(mat.Transparency) OUT", "clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist", "= list() glowlist = list() classlist = list() shinylist = list() smoothlist =", "colorlist = list() glowlist = list() classlist = list() shinylist = list() smoothlist", "mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist = list() shinylist", "= list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass)", "list() for mat in mats: colorlist.append(mat.Color) if mat.Glow: glowlist.append(True) else: glowlist.append(False) classlist.append(mat.MaterialClass) shinylist.append(mat.Shininess)", "Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist = list() glowlist = list() classlist", "shinylist = list() smoothlist = list() translist = list() for mat in mats:", "= list() shinylist = list() smoothlist = list() translist = list() for mat", "<gh_stars>100-1000 import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * mats = UnwrapElement(IN[0]) colorlist =", "list() classlist = list() shinylist = list() smoothlist = list() translist = list()", "classlist = list() shinylist = list() smoothlist = list() translist = list() for" ]
[]
[ "## The generated population may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a", "if (np.random.uniform() < r[k]): result[i, k] = 0 else: result[i, k] = self.X[t,", "import numpy as np import pickle class Person: def __init__(self, pop): self.genes =", "self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab =", "3 + self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments,", "this function again generates a completely new population sample, purging the previous one", "self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1]", "# Initially no symptoms apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if", "n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines =", "if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine ==", "the population vaccine_array (n*|A| array), array indicating which vaccines are to be given", "= 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i +=", "The symptoms of the selected individuals Notes: Currently only one vaccine dose is", "Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as", "self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments,", "self.n_symptoms]) i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms", "if no vaccine is given def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array)", "patient Returns: The symptoms of the selected individuals \"\"\" N = len(person_index) result", "t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if", "vaccine = -1 if no vaccine is given def vaccinate(self, vaccine_array, pop): ##", "int n_individuals: the number of individuals to generate \"\"\" self.n_individuals = n_individuals X", "def treat(self, person_index, treatment): \"\"\" Treat a patient. Args: person_index (int array), indices", "Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever,", "(self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0, 0.001,", "p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1", "= 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G", "person_index (int array), indices of persons in the population to treat treatment_array (n*|A|", "self.comorbidities[5] = self.HT * self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]):", "##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result # main if __name__ ==", "/ 100 # age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0]", "use vaccine = -1 if no vaccine is given def vaccinate(self, vaccine_array, pop):", "pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1):", "pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i in range(pop.n_comorbidities): if", "np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity", "Args: person_index (int array), indices of persons in the population to treat treatment_array", "if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model long", "The generated population may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population.", "= X return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to", "# model long covid sufferers by increasing the chances of various # symptoms", "0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() *", "else: vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1]", "# use t to index the original population # print(treatment) for i in", "self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine >=", "following structure: ## X: characteristics before treatment, including whether or not they #", "self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons", "be given to each patient Returns: The symptoms of the selected individuals Notes:", "k] = 0 else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\",", "self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] =", "Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np import pickle class Person:", "k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result # main if __name__", "= self.X[t, k] return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender,", "Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild", "0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1", "1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7] +=", "0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension = 0.3", "1): self.symptom_baseline *= 0.5 # baseline symptoms of non-covid patients if (self.symptoms[0] ==", "were vaccinated ## The generated population may already be vaccinated. def generate(self, n_individuals):", "person_index, vaccine_array): \"\"\" Give a vaccine to a specific person. Args: person_index (int", "1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if (k <=", "pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1]", "apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0]", "pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities for", "chances of various # symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1] ==", "factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline)", "class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6;", "0.01 # Initially no symptoms apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms", "= np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s", "0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines", "if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated", "specific person. Args: person_index (int array), indices of person in the population vaccine_array", "is implemented, but in the future multiple doses may be modelled. \"\"\" outcome", "person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if", "X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment", "1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated)", "(n*|A| array), array indicating which vaccines are to be given to each patient", "if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline =", "6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0]", "== 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine]", "Comborbidities: ## Comborbidities: ## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom", "0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy", "= np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines", "in range(self.n_symptoms): if (k <= 1): result[t, k] = X[t, k] else: if", "0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence", "size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj = self.age /", "Myocarditis, Blood-Clots import numpy as np import pickle class Person: def __init__(self, pop):", "i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] =", "0.0001]) ## Common side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] +=", "## Common side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001", "n_genes = 128 n_vaccines = 3 n_treatments = 4 pop = Population(n_genes, n_vaccines,", "already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call this function before", "may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for t", "= 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05,", "<= 1): result[t, k] = X[t, k] else: if (np.random.uniform() < r[k]): result[t,", "n_individuals: the number of individuals to generate \"\"\" self.n_individuals = n_individuals X =", "before treatment, including whether or not they # were vaccinated ## The generated", "[0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] =", "not they # were vaccinated ## The generated population may already be vaccinated.", "numpy as np import pickle class Person: def __init__(self, pop): self.genes = np.random.choice(2,", "0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine", "Treat a patient. Args: person_index (int array), indices of persons in the population", "but in the future multiple doses may be modelled. \"\"\" outcome = np.zeros([len(person_index),", "k] else: if (np.random.uniform() < r[k]): result[t, k] = 0 else: result[t, k]", "1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially", "def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return", "# main if __name__ == \"__main__\": import pandas try: import policy except: import", "array), indices of person in the population vaccine_array (n*|A| array), array indicating which", "X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv',", "project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines = 3 n_treatments = 4", "be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in", "Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death", "person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1 return outcome def treat(self,", "(np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array(", "0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0]", "(np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2,", "a vaccine to a specific person. Args: person_index (int array), indices of person", "Give a vaccine to a specific person. Args: person_index (int array), indices of", "pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False,", "Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms:", "* self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform()", "np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[i, k]", "vaccinated ## The generated population may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate", "if __name__ == \"__main__\": import pandas try: import policy except: import project2.src.covid.policy n_symptoms", "* pop.n_vaccines # use vaccine = -1 if no vaccine is given def", "self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] +=", "indicating which vaccines are to be given to each patient Returns: The symptoms", "X: characteristics before treatment, including whether or not they # were vaccinated ##", "0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes)", "(np.random.uniform() < r[k]): result[t, k] = 0 else: result[t, k] = X[t, k]", "if (self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] +=", "= X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments))", "persons in the population to treat treatment_array (n*|A| array), array indicating which treatments", "* 0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4]", "Pneumonia, Myocarditis, Blood-Clots import numpy as np import pickle class Person: def __init__(self,", "Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t in range(n_individuals): person", "= np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t", "[0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85]", "self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart", "else: result[i, k] = self.X[t, k] return result def get_features(self, person_index): x_t =", "= 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001,", "0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline = np.array(", "pandas try: import policy except: import project2.src.covid.policy n_symptoms = 10 n_genes = 128", "= np.random.gamma(3, 11) self.age_adj = self.age / 100 # age affects everything self.income", "patients if (self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0,", "[pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0]", "treatments are to be given to each patient Returns: The symptoms of the", "self.historical_prevalence = 0.1 ## Generates data with the following structure: ## X: characteristics", "0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3]", "pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes)", "long covid sufferers by increasing the chances of various # symptoms slightly if", "this function before anything else is done. Calling this function again generates a", "1 return outcome def treat(self, person_index, treatment): \"\"\" Treat a patient. Args: person_index", "== 0 and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01,", "person_index (int array), indices of person in the population vaccine_array (n*|A| array), array", "0 else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return", "[person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X =", "k] else: if (np.random.uniform() < r[k]): result[i, k] = 0 else: result[i, k]", "= pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01", "pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes +", "a population. Call this function before anything else is done. Calling this function", "= [] for t in range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate)", "0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() *", "0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline", "t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1 return outcome", "0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1", "and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02, 0.002,", "0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04,", "else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01,", "0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model", "def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms =", "self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy =", "< self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence,", "= 128 n_vaccines = 3 n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments)", "1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self)", "0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate", "# increase symptom probabilities for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1]", "= np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y =", "= 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations)", "== 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 #", "self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity =", "sample, purging the previous one from memory. :param int n_individuals: the number of", "future multiple doses may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i =", "np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj = self.age / 100 # age", "self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model long covid sufferers by increasing", "self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj", "1): result[t, k] = X[t, k] else: if (np.random.uniform() < r[k]): result[t, k]", "= self.age / 100 # age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities", "## Generates data with the following structure: ## X: characteristics before treatment, including", "= np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline", "+ self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms])", "= 0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7]", "4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv',", "k] = 0 else: result[i, k] = self.X[t, k] return result def get_features(self,", "1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv',", "[0] * pop.n_vaccines # use vaccine = -1 if no vaccine is given", "np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X", "np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common", "4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if", "population # print(treatment) for i in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i])", "x_t self.X = X return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a", "pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11)", "result[t]) return treatments, result # main if __name__ == \"__main__\": import pandas try:", "= 0 else: result[i, k] = self.X[t, k] return result def get_features(self, person_index):", "genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine,", "self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2,", "Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache, Stomach ## Critical", "person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X = X return", "import pandas try: import policy except: import project2.src.covid.policy n_symptoms = 10 n_genes =", "self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :]", "print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k", "result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines])", "each patient Returns: The symptoms of the selected individuals Notes: Currently only one", "Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms:", "symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1", "treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False, index=False)", "pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine]", "7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *=", "and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01, 0.04,", "0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten()", "dose is implemented, but in the future multiple doses may be modelled. \"\"\"", "Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age", "Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, #", "0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine == 1):", "np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] =", "pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3]", "result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms):", "/= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking", "0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms apart from Covid+/CovidPre self.symptoms =", "\"\"\"Generate a population. Call this function before anything else is done. Calling this", "0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart = 0.15", "Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache,", "* pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender ==", "in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def", "## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np import pickle class", "if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline symptoms of non-covid patients", "population. Call this function before anything else is done. Calling this function again", "self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7,", "one vaccine dose is implemented, but in the future multiple doses may be", "(vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and", "print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s]", "== 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7]", "as np import pickle class Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes)", "self.smoking = 0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy", "= 1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05,", "function again generates a completely new population sample, purging the previous one from", "= np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t in range(n_individuals): person =", "# use i to index the treated # use t to index the", "self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]]", "outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self)", "- 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array,", "self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population def treatment(self, X, policy): treatments", "= self.X[t, k] else: if (np.random.uniform() < r[k]): result[i, k] = 0 else:", "given to each patient Returns: The symptoms of the selected individuals \"\"\" N", "array indicating which treatments are to be given to each patient Returns: The", "* pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking", "Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np import", "= np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence;", "generates a completely new population sample, purging the previous one from memory. :param", "## Comborbidities: ## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list:", "no vaccine is given def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >=", "covid sufferers by increasing the chances of various # symptoms slightly if (self.symptoms[0]", "function before anything else is done. Calling this function again generates a completely", "self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t in range(n_individuals):", "for k in range(self.n_symptoms): if (k <= 1): result[t, k] = X[t, k]", "= [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75,", "k] = self.X[t, k] else: if (np.random.uniform() < r[k]): result[i, k] = 0", "= [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2]", "= self.HT * self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i]", "self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT =", "1) self.age = np.random.gamma(3, 11) self.age_adj = self.age / 100 # age affects", "indicating which treatments are to be given to each patient Returns: The symptoms", "self.persons[t].vaccines]) return x_t ## Treats a population def treatment(self, X, policy): treatments =", "range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten()", "0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002,", "range(self.n_symptoms): if (k <= 1): result[t, k] = X[t, k] else: if (np.random.uniform()", "100 # age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0] *", "pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if", "np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity", "a patient. Args: person_index (int array), indices of persons in the population to", "128 n_vaccines = 3 n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations", "vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] =", "index the original population # print(treatment) for i in range(N): t = person_index[i]", "increase symptom probabilities for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] =", "self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02,", "self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj", "[0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1,", "self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking = 0.2", "self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]);", "self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] +=", "np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines =", "3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine]", "memory. :param int n_individuals: the number of individuals to generate \"\"\" self.n_individuals =", "self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01", "i += 1 return outcome def treat(self, person_index, treatment): \"\"\" Treat a patient.", "of persons in the population to treat treatment_array (n*|A| array), array indicating which", "(n*|A| array), array indicating which treatments are to be given to each patient", "vaccine dose is implemented, but in the future multiple doses may be modelled.", "0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines # use", "self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G)", "only one vaccine dose is implemented, but in the future multiple doses may", "0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines # use vaccine", "self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj = self.age / 100", "0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) *", "is given def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated", "Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np", "* self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1", "True else: vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array", "symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy", "non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline = np.array(", "= np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ##", "0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline #", "the number of individuals to generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals,", "np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj = self.age", "k in range(self.n_symptoms): if (k <= 1): result[t, k] = X[t, k] else:", "# age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities", "(vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms,", "def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age =", "symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array(", "+= 0.01 # Initially no symptoms apart from Covid+/CovidPre self.symptoms = [0] *", "k in range(self.n_symptoms): if (k <= 1): result[i, k] = self.X[t, k] else:", "i to index the treated # use t to index the original population", "self.n_symptoms]) self.persons = [] for t in range(n_individuals): person = Person(self) vaccine =", "1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05,", "self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes =", "print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False,", "in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1 return outcome def", "np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals,", "vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *=", "2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] +=", "Blood-Clots import numpy as np import pickle class Person: def __init__(self, pop): self.genes", "0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy =", "treatment_array (n*|A| array), array indicating which treatments are to be given to each", "* self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform()", "person in the population vaccine_array (n*|A| array), array indicating which vaccines are to", "\"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i],", "from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] =", "\"\"\" Treat a patient. Args: person_index (int array), indices of persons in the", "else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms apart from", "treated # use t to index the original population # print(treatment) for i", "result # main if __name__ == \"__main__\": import pandas try: import policy except:", "0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines # use vaccine =", "(sum(vaccine_array) >= 0): vaccinated = True else: vaccinated = False if (vaccinated): vaccine", "of individuals to generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 +", "which treatments are to be given to each patient Returns: The symptoms of", "X return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to a", "1 # increase symptom probabilities for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]):", "# use vaccine = -1 if no vaccine is given def vaccinate(self, vaccine_array,", "0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model long covid sufferers by", "= np.zeros([N, self.n_symptoms]) # use i to index the treated # use t", "0.5 # baseline symptoms of non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1]", "[person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X = X", ">= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age,", "n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated =", "characteristics before treatment, including whether or not they # were vaccinated ## The", "* self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT", "*= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *=", "s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population:", "one from memory. :param int n_individuals: the number of individuals to generate \"\"\"", "(self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline symptoms of non-covid patients if", "multiple doses may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0", "Treats a population def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result =", "= 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if (k", "(self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01", "Returns: The symptoms of the selected individuals \"\"\" N = len(person_index) result =", "= np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a", "= False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine]", "individuals \"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms]) # use i to", "n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines", "pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] *", "a completely new population sample, purging the previous one from memory. :param int", "self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2] *", "8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5", "np.zeros([N, self.n_symptoms]) # use i to index the treated # use t to", "0.1 self.historical_prevalence = 0.1 ## Generates data with the following structure: ## X:", "Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >=", "given to each patient Returns: The symptoms of the selected individuals Notes: Currently", "pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2]", "[] for t in range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) -", "< r[k]): result[i, k] = 0 else: result[i, k] = self.X[t, k] return", "= 0 else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t])", "vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1]", "n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines", "print(treatment) for i in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten()", "person. Args: person_index (int array), indices of person in the population vaccine_array (n*|A|", "np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in", "(k <= 1): result[t, k] = X[t, k] else: if (np.random.uniform() < r[k]):", "0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates data with the", "for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline =", "= [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95,", "x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats", "i in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k", "array), array indicating which treatments are to be given to each patient Returns:", "= [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1", "= n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A", "0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms apart", "11) self.age_adj = self.age / 100 # age affects everything self.income = np.random.gamma(1,", "result[t, k] = X[t, k] else: if (np.random.uniform() < r[k]): result[t, k] =", "= n_individuals X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines +", "treatments, result # main if __name__ == \"__main__\": import pandas try: import policy", "[0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects", "= 0.1 self.historical_prevalence = 0.1 ## Generates data with the following structure: ##", "Death ## Mild symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis,", "Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia,", "= Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine", "return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities,", "else: if (np.random.uniform() < r[k]): result[i, k] = 0 else: result[i, k] =", "np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic", "0.1 ## Generates data with the following structure: ## X: characteristics before treatment,", "(vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7,", "(self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0, 0.06,", "self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates data with the following structure:", "the population to treat treatment_array (n*|A| array), array indicating which treatments are to", "* self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[t, k] =", "symptoms of the selected individuals Notes: Currently only one vaccine dose is implemented,", "self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\", result[t])", "*= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] ==", "[0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors", "self.persons = [] for t in range(n_individuals): person = Person(self) vaccine = np.random.choice(4,", "outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False, index=False) pandas.DataFrame(y).to_csv('treatment_outcomes.csv',", "treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t", "= vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3,", "Common side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if", "self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02,", "to each patient Returns: The symptoms of the selected individuals \"\"\" N =", "[0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9]", "in the population vaccine_array (n*|A| array), array indicating which vaccines are to be", "self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population def treatment(self, X,", "+= 1 return outcome def treat(self, person_index, treatment): \"\"\" Treat a patient. Args:", "0.1 # model long covid sufferers by increasing the chances of various #", "== 1 and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04,", "a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False, index=False) pandas.DataFrame(y).to_csv('treatment_outcomes.csv', header=False,", "vaccine is given def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0):", "## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots,", "# baseline symptoms of non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1] ==", "np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if", ">= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model long covid sufferers", "0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7,", "n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation =", "pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a, y =", "in the population to treat treatment_array (n*|A| array), array indicating which treatments are", "range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self,", "np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes =", "result[t, k] = 0 else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] ,", "## Mild symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots", "population may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call this", "0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline =", "+= 0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms apart from Covid+/CovidPre self.symptoms", "(vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2", "modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in person_index:", "a population def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0],", "purging the previous one from memory. :param int n_individuals: the number of individuals", "and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]]", "= Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False)", "= pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes", "various # symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline", "policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]):", "if (np.random.uniform() < r[k]): result[t, k] = 0 else: result[t, k] = X[t,", "data with the following structure: ## X: characteristics before treatment, including whether or", "k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result #", "population sample, purging the previous one from memory. :param int n_individuals: the number", "= np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] ==", "self.asthma = 0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart", "side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine", "1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities,", "selected individuals \"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms]) # use i", "in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) *", "-1 if no vaccine is given def vaccinate(self, vaccine_array, pop): ## Vaccinated if", "X[t, :] = x_t self.X = X return X def vaccinate(self, person_index, vaccine_array):", "self.persons[i].symptoms i += 1 return outcome def treat(self, person_index, treatment): \"\"\" Treat a", "if (self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0,", "= np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] =", "index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1]", "= person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if (k", "the chances of various # symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1]", "return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to a specific", "def generate(self, n_individuals): \"\"\"Generate a population. Call this function before anything else is", "X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to a specific person.", "X[t, k] else: if (np.random.uniform() < r[k]): result[t, k] = 0 else: result[t,", "= 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment =", "array), indices of persons in the population to treat treatment_array (n*|A| array), array", "self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ##", "= x_t self.X = X return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give", "t to index the original population # print(treatment) for i in range(N): t", "in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in", "0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence =", "= np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes", "self.X = X return X def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine", "0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) *", "+= 0.1 # model long covid sufferers by increasing the chances of various", "= X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result # main", "if (self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0,", "to treat treatment_array (n*|A| array), array indicating which treatments are to be given", "## Treats a population def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result", "number of individuals to generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3", "in range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array =", "to be given to each patient Returns: The symptoms of the selected individuals", "self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform() <", "1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2,", "Args: person_index (int array), indices of person in the population vaccine_array (n*|A| array),", "pickle class Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2,", "self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004,", "x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] =", "__init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3,", "symptoms of non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline", "= 0.1 ## Generates data with the following structure: ## X: characteristics before", "of the selected individuals Notes: Currently only one vaccine dose is implemented, but", "## X: characteristics before treatment, including whether or not they # were vaccinated", "= pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01 else:", "population vaccine_array (n*|A| array), array indicating which vaccines are to be given to", "# Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache, Stomach", "= 0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy =", "self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population def treatment(self,", "0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines # use vaccine = -1", "== 0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001,", "= pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment =", "if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3,", "= 0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension =", "result[i, k] = 0 else: result[i, k] = self.X[t, k] return result def", "self.diabetes = 0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6,", "0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy =", "class Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1)", "self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence =", "vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0):", "self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9,", "self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[i, k] = self.X[t,", "== 0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01,", "individuals to generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 + self.n_genes", "array indicating which vaccines are to be given to each patient Returns: The", "# print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]):", "6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes,", "self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population def treatment(self, X, policy):", "* pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in range(2, pop.n_symptoms):", "is done. Calling this function again generates a completely new population sample, purging", "[self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population def", "np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender", "n_individuals): \"\"\"Generate a population. Call this function before anything else is done. Calling", "for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i]", "policy except: import project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines = 3", "np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1", "0): vaccinated = True else: vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array)", "n_symptoms = 10 n_genes = 128 n_vaccines = 3 n_treatments = 4 pop", "np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1):", "Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache, Stomach ##", "= n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments =", "Blood-Clots, Death ## Mild symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia,", "= 10 n_genes = 128 n_vaccines = 3 n_treatments = 4 pop =", "else: if (np.random.uniform() < r[k]): result[t, k] = 0 else: result[t, k] =", "self.symptoms[0] = 1 # increase symptom probabilities for symptoms when covid+ if (np.random.uniform()", "0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines #", "for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1 return", "n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating", "everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma", "covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence,", "self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() <", "+ self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] =", "* pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities", "= 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G =", "np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] =", "= self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj for", "if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5,", "0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline", "def vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to a specific person. Args:", "= self.persons[i].symptoms i += 1 return outcome def treat(self, person_index, treatment): \"\"\" Treat", "if (k <= 1): result[i, k] = self.X[t, k] else: if (np.random.uniform() <", "implemented, but in the future multiple doses may be modelled. \"\"\" outcome =", "0 else: result[i, k] = self.X[t, k] return result def get_features(self, person_index): x_t", "age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0]", "self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines]) X[t,", "treat(self, person_index, treatment): \"\"\" Treat a patient. Args: person_index (int array), indices of", "= 3 n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000", "model long covid sufferers by increasing the chances of various # symptoms slightly", "outcome[i] = self.persons[i].symptoms i += 1 return outcome def treat(self, person_index, treatment): \"\"\"", "treat treatment_array (n*|A| array), array indicating which treatments are to be given to", "person.income], person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X = X return X", "len(person_index) result = np.zeros([N, self.n_symptoms]) # use i to index the treated #", "return treatments, result # main if __name__ == \"__main__\": import pandas try: import", "header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] ==", "0.1]); self.vaccines = [0] * pop.n_vaccines # use vaccine = -1 if no", "when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence,", "treatment, including whether or not they # were vaccinated ## The generated population", "+ self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t in", "self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005,", "< self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes", "= np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj = self.age / 100 #", "pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self, n_genes,", "whether or not they # were vaccinated ## The generated population may already", "outcome def treat(self, person_index, treatment): \"\"\" Treat a patient. Args: person_index (int array),", "def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for", "0.2 self.symptom_baseline[4] += 0.1 # model long covid sufferers by increasing the chances", "symptom probabilities for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1", ", \"Y:\", result[t]) return treatments, result # main if __name__ == \"__main__\": import", "# genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\",", "self.genes = np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj", "result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result", "== \"__main__\": import pandas try: import policy except: import project2.src.covid.policy n_symptoms = 10", "\"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms]) # use i to index", "Initially no symptoms apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform()", "symptoms apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]):", "return outcome def treat(self, person_index, treatment): \"\"\" Treat a patient. Args: person_index (int", "symptoms of the selected individuals \"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms])", "y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False) pandas.DataFrame(a).to_csv('treatment_actions.csv', header=False, index=False) pandas.DataFrame(y).to_csv('treatment_outcomes.csv', header=False, index=False)", "<= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05,", "np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t =", "new population sample, purging the previous one from memory. :param int n_individuals: the", "if (sum(vaccine_array) >= 0): vaccinated = True else: vaccinated = False if (vaccinated):", "X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a,", "import pickle class Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender =", "1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities =", "by increasing the chances of various # symptoms slightly if (self.symptoms[0] == 1", "= pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension +", "self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities for symptoms when covid+ if", "10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /=", "= 0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart =", "= np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine]", "self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension", ":] = x_t self.X = X return X def vaccinate(self, person_index, vaccine_array): \"\"\"", "self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i += 1 return outcome def treat(self, person_index,", "each patient Returns: The symptoms of the selected individuals \"\"\" N = len(person_index)", "r[k]): result[i, k] = 0 else: result[i, k] = self.X[t, k] return result", "0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001,", "for i in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for", "== 1): self.symptom_baseline *= 0.5 # baseline symptoms of non-covid patients if (self.symptoms[0]", "0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5,", "for t in range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1", "done. Calling this function again generates a completely new population sample, purging the", "= np.random.choice(2, size=pop.n_genes) self.gender = np.random.choice(2, 1) self.age = np.random.gamma(3, 11) self.age_adj =", "array), array indicating which vaccines are to be given to each patient Returns:", "k] = X[t, k] else: if (np.random.uniform() < r[k]): result[t, k] = 0", "Returns: The symptoms of the selected individuals Notes: Currently only one vaccine dose", "n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment", "self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 #", "which vaccines are to be given to each patient Returns: The symptoms of", "self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline", "person.vaccines]) X[t, :] = x_t self.X = X return X def vaccinate(self, person_index,", "self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i in", "*= 0.5 # baseline symptoms of non-covid patients if (self.symptoms[0] == 0 and", "0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates data with the following", "population def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms])", "def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True", "0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01", "< r[k]): result[t, k] = 0 else: result[t, k] = X[t, k] ##print(\"X:\",", "0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine == 1): self.symptom_baseline[8]", "Calling this function again generates a completely new population sample, purging the previous", "baseline symptoms of non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1] == 0):", "Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated", "(vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2):", "completely new population sample, purging the previous one from memory. :param int n_individuals:", "vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income],", "__init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms = 10", "<= 1): result[i, k] = self.X[t, k] else: if (np.random.uniform() < r[k]): result[i,", "n_individuals X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms])", "# were vaccinated ## The generated population may already be vaccinated. def generate(self,", "\"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities +", "self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline symptoms", "n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A =", "of non-covid patients if (self.symptoms[0] == 0 and self.symptoms[1] == 0): self.symptom_baseline =", "range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array = np.zeros(self.n_vaccines)", "self.X[t, k] else: if (np.random.uniform() < r[k]): result[i, k] = 0 else: result[i,", "+= 0.2 self.symptom_baseline[4] += 0.1 # model long covid sufferers by increasing the", "= np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08", "the selected individuals Notes: Currently only one vaccine dose is implemented, but in", "X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result # main if __name__ == \"__main__\":", "= 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment", "0.8] self.critical_efficacy = [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate =", "x_t ## Treats a population def treatment(self, X, policy): treatments = np.zeros([X.shape[0], self.n_treatments])", "self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]);", "self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *=", "t in range(n_individuals): person = Person(self) vaccine = np.random.choice(4, p=self.vaccination_rate) - 1 vaccine_array", "k] return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes,", "sufferers by increasing the chances of various # symptoms slightly if (self.symptoms[0] ==", "np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t in range(n_individuals): person = Person(self)", "self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms])", "0.04, 0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline", "self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking = 0.2 self.diabetes = 0.1", "result = np.zeros([N, self.n_symptoms]) # use i to index the treated # use", "the previous one from memory. :param int n_individuals: the number of individuals to", ":param int n_individuals: the number of individuals to generate \"\"\" self.n_individuals = n_individuals", "symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline += np.array(", "from memory. :param int n_individuals: the number of individuals to generate \"\"\" self.n_individuals", "Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1", "vaccine_array = np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person)", "np import pickle class Person: def __init__(self, pop): self.genes = np.random.choice(2, size=pop.n_genes) self.gender", "3 n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations = 1000 X_observation", "Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache,", "self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms apart from Covid+/CovidPre", "if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate(", "pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True else: vaccinated =", "be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call this function before anything", "= len(person_index) result = np.zeros([N, self.n_symptoms]) # use i to index the treated", "= np.zeros([len(person_index), self.n_symptoms]) i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i]", ">= 0): vaccinated = True else: vaccinated = False if (vaccinated): vaccine =", "pop.n_vaccines # use vaccine = -1 if no vaccine is given def vaccinate(self,", "try: import policy except: import project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines", "0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine", "person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes, person.comorbidities, person.vaccines])", "pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9]", "= 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender, person.income], person.genes,", "result[i, k] = self.X[t, k] return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms,", "== 1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False, index=False)", "*= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 #", "to index the original population # print(treatment) for i in range(N): t =", "treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if", "Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True else: vaccinated = False if", "0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT *", "vaccine_array): \"\"\" Give a vaccine to a specific person. Args: person_index (int array),", "np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\",", "= [0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase", "self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *=", "self) outcome[i] = self.persons[i].symptoms i += 1 return outcome def treat(self, person_index, treatment):", "X, policy): treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in", "t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t])", "self.symptom_baseline[4] += 0.1 # model long covid sufferers by increasing the chances of", "vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call this function before anything else", "self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments", "self.income = np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1]", "= np.zeros(self.n_vaccines) if (vaccine >= 0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t", "be given to each patient Returns: The symptoms of the selected individuals \"\"\"", "main if __name__ == \"__main__\": import pandas try: import policy except: import project2.src.covid.policy", "0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if", "X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment,", "self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1", "= 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy", "or not they # were vaccinated ## The generated population may already be", "X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y", "(int array), indices of person in the population vaccine_array (n*|A| array), array indicating", "# print(treatment) for i in range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) *", "self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[t, k] = X[t,", "indices of persons in the population to treat treatment_array (n*|A| array), array indicating", "including whether or not they # were vaccinated ## The generated population may", "get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t", "person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if (k <=", "pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:,", "self.symptom_baseline[9] += 0.01 # Initially no symptoms apart from Covid+/CovidPre self.symptoms = [0]", "0 and self.symptoms[1] == 0): self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02,", "1 and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0, 0, 0.06, 0.04, 0.01,", "they # were vaccinated ## The generated population may already be vaccinated. def", "in the future multiple doses may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms])", "vaccines are to be given to each patient Returns: The symptoms of the", "= n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms])", "except: import project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines = 3 n_treatments", "self.age / 100 # age affects everything self.income = np.random.gamma(1, 10000) self.comorbidities =", "# symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline +=", "to index the treated # use t to index the original population #", "= pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\") a, y", "previous one from memory. :param int n_individuals: the number of individuals to generate", "person.comorbidities, person.vaccines]) X[t, :] = x_t self.X = X return X def vaccinate(self,", "of person in the population vaccine_array (n*|A| array), array indicating which vaccines are", "self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab", "False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if", "result[i, k] = self.X[t, k] else: if (np.random.uniform() < r[k]): result[i, k] =", "= [0.8, 0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1,", "vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2, 3, 4,", "original population # print(treatment) for i in range(N): t = person_index[i] r =", "r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1):", "self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r", "self.vaccines = [0] * pop.n_vaccines # use vaccine = -1 if no vaccine", "+= np.array( [0, 0, 0.06, 0.04, 0.01, 0.04, 0.004, 0.01, 0.04, 0.01]); #", "the following structure: ## X: characteristics before treatment, including whether or not they", "increasing the chances of various # symptoms slightly if (self.symptoms[0] == 1 and", "pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline", "## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive,", "self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence,", "else is done. Calling this function again generates a completely new population sample,", "a specific person. Args: person_index (int array), indices of person in the population", "the original population # print(treatment) for i in range(N): t = person_index[i] r", "(k <= 1): result[i, k] = self.X[t, k] else: if (np.random.uniform() < r[k]):", "diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach,", "r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1):", "(np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments):", "n_vaccines = 3 n_treatments = 4 pop = Population(n_genes, n_vaccines, n_treatments) n_observations =", "self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] =", "0): vaccine_array[vaccine] = 1 person.vaccinate(vaccine_array, self) self.persons.append(person) x_t = np.concatenate( [person.symptoms, [person.age, person.gender,", "if (k <= 1): result[t, k] = X[t, k] else: if (np.random.uniform() <", "__name__ == \"__main__\": import pandas try: import policy except: import project2.src.covid.policy n_symptoms =", "in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0", "= 1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities", "10 n_genes = 128 n_vaccines = 3 n_treatments = 4 pop = Population(n_genes,", "n_treatments) n_observations = 1000 X_observation = pop.generate(n_observations) pandas.DataFrame(X_observation).to_csv('observation_features.csv', header=False, index=False) n_treated = 1000", "again generates a completely new population sample, purging the previous one from memory.", "slightly if (self.symptoms[0] == 1 and self.symptoms[1] == 0): self.symptom_baseline += np.array( [0,", "vaccine to a specific person. Args: person_index (int array), indices of person in", "self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially no", "0.004, 0.01, 0.04, 0.01]); # genetic factors self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten()", "use i to index the treated # use t to index the original", "self.n_treatments, self.n_symptoms]) self.persons = [] for t in range(n_individuals): person = Person(self) vaccine", "0.002, 0.0001]) ## Common side-effects if (vaccine == 1): self.symptom_baseline[8] += 0.01 self.symptom_baseline[9]", "+ self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons =", "self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for t", "if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities for symptoms", "patient. Args: person_index (int array), indices of persons in the population to treat", "self.symptoms[s] = 1 class Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes", "population to treat treatment_array (n*|A| array), array indicating which treatments are to be", "* self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8]", "Comborbidities: ## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered,", "0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine ==", "(int array), indices of persons in the population to treat treatment_array (n*|A| array),", "else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments,", "\"__main__\": import pandas try: import policy except: import project2.src.covid.policy n_symptoms = 10 n_genes", "0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] =", "+= 0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 #", "range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline", "structure: ## X: characteristics before treatment, including whether or not they # were", "indices of person in the population vaccine_array (n*|A| array), array indicating which vaccines", "self.symptom_baseline = np.array( [0, 0, 0.001, 0.01, 0.02, 0.002, 0.005, 0.001, 0.002, 0.0001])", "self.n_symptoms]) # use i to index the treated # use t to index", "Population: def __init__(self, n_genes, n_vaccines, n_treatments): self.n_genes = n_genes self.n_comorbidities = 6; self.n_symptoms", "= np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] =", "= np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print", "self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities + self.n_vaccines", "+= 0.01 self.symptom_baseline[9] += 0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01 if", "* 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT", "= np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[i,", "vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True else:", "self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i", "== 2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4]", "np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[t, k]", "= pop.heart * self.age_adj self.comorbidities[5] = self.HT * self.age_adj for i in range(pop.n_comorbidities):", "selected individuals Notes: Currently only one vaccine dose is implemented, but in the", "\"Y:\", result[t]) return treatments, result # main if __name__ == \"__main__\": import pandas", "self.HT * self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] =", "Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever,", "(vaccine >= 0): self.symptom_baseline[3] += 0.2 self.symptom_baseline[4] += 0.1 # model long covid", "sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3 self.smoking =", "if (vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0): self.symptom_baseline[3] +=", "[0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates data", "= pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1] *", "pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab = pop.diabetes + self.comorbidities[1] * 0.5", "pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj", "## Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True else: vaccinated = False", "0.2, 0.1]); self.vaccines = [0] * pop.n_vaccines # use vaccine = -1 if", "+ self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart * self.age_adj self.comorbidities[5]", "vaccine_array (n*|A| array), array indicating which vaccines are to be given to each", "## Comborbidities: ## Comborbidities: ## Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ##", "for t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r =", "self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments self.G", "may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call this function", "vaccinated = True else: vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines", "[0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence", "10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity *", "= 1 # increase symptom probabilities for symptoms when covid+ if (np.random.uniform() <=", "probabilities for symptoms when covid+ if (np.random.uniform() <= self.symptom_baseline[1]): self.symptoms[1] = 1 self.symptom_baseline", "<= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities for symptoms when covid+", "Generates data with the following structure: ## X: characteristics before treatment, including whether", "with the following structure: ## X: characteristics before treatment, including whether or not", "use t to index the original population # print(treatment) for i in range(N):", "self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for", "0.75, 0.85] self.death_efficacy = [0.9, 0.95, 0.9] self.vaccination_rate = [0.7, 0.1, 0.1, 0.1]", "self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.3, 0.2, 0.05, 0.2, 0.02, 0.05, 0.2, 0.1]);", "k] = self.X[t, k] return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age,", "self.comorbidities[i] = 1 else: self.comorbidities[i] = 0 self.symptom_baseline = np.array( [pop.historical_prevalence, pop.prevalence, 0.01,", "n_treatments self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma", "+= 0.001 if (vaccine == 2): self.symptom_baseline[7] += 0.01 if (vaccine >= 0):", "before anything else is done. Calling this function again generates a completely new", "np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ## Treats a population", "0.005, 0.001, 0.002, 0.0001]) ## Common side-effects if (vaccine == 1): self.symptom_baseline[8] +=", "self.X[t, k] return result def get_features(self, person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income],", "generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 + self.n_genes + self.n_comorbidities", "import project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines = 3 n_treatments =", "person_index, treatment): \"\"\" Treat a patient. Args: person_index (int array), indices of persons", "pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline symptoms of non-covid", "N = len(person_index) result = np.zeros([N, self.n_symptoms]) # use i to index the", "Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis,", "self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01", "np.random.gamma(3, 11) self.age_adj = self.age / 100 # age affects everything self.income =", "vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated = True else: vaccinated", "0.1 self.heart = 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy", "self.obesity = 0.3 self.smoking = 0.2 self.diabetes = 0.1 self.heart = 0.15 self.htension", "= 0.15 self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6,", "= pop.htension + self.comorbidities[2] * 0.5 self.comorbidities[3] = self.diab self.comorbidities[4] = pop.heart *", "vaccinate(self, person_index, vaccine_array): \"\"\" Give a vaccine to a specific person. Args: person_index", "doses may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i = 0 for", "= np.array(np.matrix(treatments[t]) * self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[t,", "are to be given to each patient Returns: The symptoms of the selected", "1] == 1] print(\"Generating treatment outcomes\") a, y = pop.treatment(X_treatment, policy.RandomPolicy(n_treatments)) pandas.DataFrame(X_treatment).to_csv('treatment_features.csv', header=False,", "Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np import pickle", "pop.historical_prevalence; self.symptom_baseline[1] = pop.prevalence; if (self.gender == 1): self.symptom_baseline[8] += 0.01 else: self.symptom_baseline[7]", "= -1 if no vaccine is given def vaccinate(self, vaccine_array, pop): ## Vaccinated", "given def vaccinate(self, vaccine_array, pop): ## Vaccinated if (sum(vaccine_array) >= 0): vaccinated =", "the treated # use t to index the original population # print(treatment) for", "in range(self.n_symptoms): if (k <= 1): result[i, k] = self.X[t, k] else: if", "of various # symptoms slightly if (self.symptoms[0] == 1 and self.symptoms[1] == 0):", "pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9] *= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1):", "self.vaccines = vaccine_array self.symptom_baseline[1] *= pop.baseline_efficacy[vaccine] if (vaccinated and self.symptoms[1] == 1): self.symptom_baseline[[2,", "+= 0.01 else: self.symptom_baseline[7] += 0.01 self.symptom_baseline[9] += 0.01 # Initially no symptoms", "affects everything self.income = np.random.gamma(1, 10000) self.comorbidities = [0] * pop.n_comorbidities self.comorbidities[0] =", "i = 0 for t in person_index: self.persons[t].vaccinate(vaccine_array[i], self) outcome[i] = self.persons[i].symptoms i", "person_index): x_t = np.concatenate([self.persons[t].symptoms, [self.persons[t].age, self.persons[t].gender, self.persons[t].income], self.persons[t].genes, self.persons[t].comorbidities, self.persons[t].vaccines]) return x_t ##", "to generate \"\"\" self.n_individuals = n_individuals X = np.zeros([n_individuals, 3 + self.n_genes +", "+ self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = [] for", "= X[t, k] else: if (np.random.uniform() < r[k]): result[t, k] = 0 else:", "self.age_adj for i in range(pop.n_comorbidities): if (np.random.uniform() < self.comorbidities[i]): self.comorbidities[i] = 1 else:", "generated population may already be vaccinated. def generate(self, n_individuals): \"\"\"Generate a population. Call", "= True else: vaccinated = False if (vaccinated): vaccine = np.argmax(vaccine_array) self.vaccines =", "(np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom probabilities for symptoms when", "= [0] * pop.n_vaccines # use vaccine = -1 if no vaccine is", "n_genes self.n_comorbidities = 6; self.n_symptoms = 10 self.n_vaccines = n_vaccines self.n_treatments = n_treatments", "to each patient Returns: The symptoms of the selected individuals Notes: Currently only", "the future multiple doses may be modelled. \"\"\" outcome = np.zeros([len(person_index), self.n_symptoms]) i", "1): result[i, k] = self.X[t, k] else: if (np.random.uniform() < r[k]): result[i, k]", "Asthma, Obesity, Smoking, Diabetes, Heart diseae, Hypertension ## Symptom list: Covid-Recovered, Covid-Positive, Taste,", "treatments = np.zeros([X.shape[0], self.n_treatments]) result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): #", "symptoms: Pneumonia, Myocarditis, Blood-Clots import numpy as np import pickle class Person: def", "1000 X_treatment = pop.generate(n_treated) X_treatment = X_treatment[X_treatment[:, 1] == 1] print(\"Generating treatment outcomes\")", "[pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline = np.array(", "self.htension = 0.3 self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8]", "generate(self, n_individuals): \"\"\"Generate a population. Call this function before anything else is done.", "individuals Notes: Currently only one vaccine dose is implemented, but in the future", "import policy except: import project2.src.covid.policy n_symptoms = 10 n_genes = 128 n_vaccines =", "self.G = np.random.uniform(size=[self.n_genes, self.n_symptoms]) self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma =", "for s in range(2, pop.n_symptoms): if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class", "Call this function before anything else is done. Calling this function again generates", "index the treated # use t to index the original population # print(treatment)", "(\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for k in", "0.001, 0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] =", "[0] * pop.n_symptoms if (np.random.uniform() <= self.symptom_baseline[0]): self.symptoms[0] = 1 # increase symptom", "self.baseline_efficacy = [0.5, 0.6, 0.7] self.mild_efficacy = [0.6, 0.7, 0.8] self.critical_efficacy = [0.8,", "= [0.7, 0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates", "anything else is done. Calling this function again generates a completely new population", "X[t, k] ##print(\"X:\", X[t,:self.n_symptoms] , \"Y:\", result[t]) return treatments, result # main if", "0.001, 0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence;", "np.array( [pop.historical_prevalence, pop.prevalence, 0.01, 0.05, 0.05, 0.01, 0.02, 0.001, 0.001, 0.001]); self.symptom_baseline =", "result = np.zeros([X.shape[0], self.n_symptoms]) for t in range(X.shape[0]): # print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])]", "the selected individuals \"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms]) # use", "self.n_comorbidities + self.n_vaccines + self.n_symptoms]) Y = np.zeros([n_individuals, self.n_treatments, self.n_symptoms]) self.persons = []", "= pop.diabetes + self.comorbidities[1] * 0.5 self.HT = pop.htension + self.comorbidities[2] * 0.5", "Currently only one vaccine dose is implemented, but in the future multiple doses", "range(N): t = person_index[i] r = np.array(np.matrix(treatment[i]) * self.A).flatten() for k in range(self.n_symptoms):", "person.genes, person.comorbidities, person.vaccines]) X[t, :] = x_t self.X = X return X def", "\"\"\" Give a vaccine to a specific person. Args: person_index (int array), indices", "to a specific person. Args: person_index (int array), indices of person in the", "range(self.n_symptoms): if (k <= 1): result[i, k] = self.X[t, k] else: if (np.random.uniform()", "self.G /= sum(self.G) self.A = np.random.uniform(size=[self.n_treatments, self.n_symptoms]) self.asthma = 0.08 self.obesity = 0.3", "r[k]): result[t, k] = 0 else: result[t, k] = X[t, k] ##print(\"X:\", X[t,:self.n_symptoms]", "self.symptom_baseline *= 0.5 # baseline symptoms of non-covid patients if (self.symptoms[0] == 0", "no symptoms apart from Covid+/CovidPre self.symptoms = [0] * pop.n_symptoms if (np.random.uniform() <=", "Mild symptoms: Taste, Fever, Headache, Stomach ## Critical symptoms: Pneumonia, Myocarditis, Blood-Clots import", "0.001]); self.symptom_baseline = np.array( np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline self.symptom_baseline[0] = pop.historical_prevalence; self.symptom_baseline[1]", "list: Covid-Recovered, Covid-Positive, Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ##", "self.age = np.random.gamma(3, 11) self.age_adj = self.age / 100 # age affects everything", "return x_t ## Treats a population def treatment(self, X, policy): treatments = np.zeros([X.shape[0],", "(np.random.uniform() < r[k]): result[i, k] = 0 else: result[i, k] = self.X[t, k]", "of the selected individuals \"\"\" N = len(person_index) result = np.zeros([N, self.n_symptoms]) #", "treatment): \"\"\" Treat a patient. Args: person_index (int array), indices of persons in", "Taste, Fever, Headache, # Pneumonia, Stomach, Myocarditis, Blood-Clots, Death ## Mild symptoms: Taste,", "# print (\"X:\", result[t]) treatments[t][policy.get_action(X[t])] = 1 r = np.array(np.matrix(treatments[t]) * self.A).flatten() for", "0.1, 0.1, 0.1] self.prevalence = 0.1 self.historical_prevalence = 0.1 ## Generates data with", "pop.n_comorbidities self.comorbidities[0] = pop.asthma self.comorbidities[1] = pop.obesity * self.age_adj self.comorbidities[2] = pop.smoking self.diab", "1): self.symptom_baseline[[2, 3, 4, 6]] *= pop.mild_efficacy[vaccine] self.symptom_baseline[[5, 7, 8]] *= pop.critical_efficacy[vaccine] self.symptom_baseline[9]", "*= pop.death_efficacy[vaccine] if (self.symptoms[0] == 1): self.symptom_baseline *= 0.5 # baseline symptoms of", "The symptoms of the selected individuals \"\"\" N = len(person_index) result = np.zeros([N,", "for k in range(self.n_symptoms): if (k <= 1): result[i, k] = self.X[t, k]", "np.matrix(self.genes) * pop.G).flatten() * self.symptom_baseline # print(\"V:\", vaccine, symptom_baseline) for s in range(2,", "self.age_adj = self.age / 100 # age affects everything self.income = np.random.gamma(1, 10000)", "* self.A).flatten() for k in range(self.n_symptoms): if (k <= 1): result[i, k] =", "patient Returns: The symptoms of the selected individuals Notes: Currently only one vaccine", "if (np.random.uniform() < self.symptom_baseline[s]): self.symptoms[s] = 1 class Population: def __init__(self, n_genes, n_vaccines,", "Notes: Currently only one vaccine dose is implemented, but in the future multiple" ]
[ "#! /usr/bin/env python import sys sys.path.append('..') from vec2d import vec2d v = vec2d(-1,", "/usr/bin/env python import sys sys.path.append('..') from vec2d import vec2d v = vec2d(-1, 1)", "import sys sys.path.append('..') from vec2d import vec2d v = vec2d(-1, 1) print (v.angle)", "python import sys sys.path.append('..') from vec2d import vec2d v = vec2d(-1, 1) print", "<reponame>xanewton/PygameCreepsGame #! /usr/bin/env python import sys sys.path.append('..') from vec2d import vec2d v =" ]
[ "*args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command}", "**kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed,", "getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int = 5, retry_interval:", "retry_count self._retry_interval = retry_interval async def execute(self, command: str, *args: Any, **kwargs: Any)", "import Any, Dict from asyncio import sleep from logging import getLogger import aioredis", "range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, )", "exc: Exception for i in range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except", "= getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int = 5,", "( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed, remaining attempts:", "super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command", "logging import getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self,", "failed, remaining attempts: {self._retry_count - i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command", "retry_interval: int = 2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count =", "def __init__( self, *args: Any, retry_count: int = 5, retry_interval: int = 2,", "ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count - i}\"", "sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise exc async def create_redis_pool(config:", "failed after {self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool:", "= retry_count self._retry_interval = retry_interval async def execute(self, command: str, *args: Any, **kwargs:", ") as e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count - i}\" )", "= 5, retry_interval: int = 2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs)", "from asyncio import sleep from logging import getLogger import aioredis log = getLogger(__name__)", "Any ) -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async", "log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count - i}\" ) exc = e", "i in range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError,", "Any) -> Any: exc: Exception for i in range(self._retry_count): try: return await super().execute(command,", "aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int", "str, *args: Any, **kwargs: Any) -> Any: exc: Exception for i in range(self._retry_count):", "def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\"))", "= e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise exc", "Any, **kwargs: Any) -> Any: exc: Exception for i in range(self._retry_count): try: return", "Any: exc: Exception for i in range(self._retry_count): try: return await super().execute(command, *args, **kwargs)", "async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"),", "as e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count - i}\" ) exc", "command: str, *args: Any, **kwargs: Any) -> Any: exc: Exception for i in", "exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address =", "getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any,", "_ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int = 5, retry_interval: int =", "_ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\")) return await aioredis.create_pool(address, pool_cls=_ConnectionsPool, **config)", "*args: Any, **kwargs: Any) -> Any: exc: Exception for i in range(self._retry_count): try:", "__init__( self, *args: Any, retry_count: int = 5, retry_interval: int = 2, **kwargs:", "2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval =", "= 2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval", "None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async def execute(self, command:", "has failed after {self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str, Any]) ->", "**kwargs: Any) -> Any: exc: Exception for i in range(self._retry_count): try: return await", "raise exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address", "-> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\")) return await aioredis.create_pool(address, pool_cls=_ConnectionsPool,", "{command} failed, remaining attempts: {self._retry_count - i}\" ) exc = e await sleep(self._retry_interval)", "sleep from logging import getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def", "retry_interval async def execute(self, command: str, *args: Any, **kwargs: Any) -> Any: exc:", "self, *args: Any, retry_count: int = 5, retry_interval: int = 2, **kwargs: Any", "async def execute(self, command: str, *args: Any, **kwargs: Any) -> Any: exc: Exception", "Dict from asyncio import sleep from logging import getLogger import aioredis log =", "Any, Dict from asyncio import sleep from logging import getLogger import aioredis log", "from logging import getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__(", "exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise", "log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int =", "asyncio import sleep from logging import getLogger import aioredis log = getLogger(__name__) class", "aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count -", "-> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async def execute(self,", "**kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async def execute(self, command: str, *args:", "from typing import Any, Dict from asyncio import sleep from logging import getLogger", "try: return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as", "in range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError,", "- i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after", "retries\") raise exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy()", "int = 5, retry_interval: int = 2, **kwargs: Any ) -> None: super().__init__(*args,", "except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed, remaining", "typing import Any, Dict from asyncio import sleep from logging import getLogger import", "import sleep from logging import getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool):", "i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count}", "after {self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config", "5, retry_interval: int = 2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count", "{self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config =", "*args: Any, retry_count: int = 5, retry_interval: int = 2, **kwargs: Any )", "for i in range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError,", "f\"Command {command} failed, remaining attempts: {self._retry_count - i}\" ) exc = e await", "def execute(self, command: str, *args: Any, **kwargs: Any) -> Any: exc: Exception for", "self._retry_interval = retry_interval async def execute(self, command: str, *args: Any, **kwargs: Any) ->", "class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count: int = 5, retry_interval: int", "create_redis_pool(config: Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\")) return", "Any, retry_count: int = 5, retry_interval: int = 2, **kwargs: Any ) ->", "import getLogger import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args:", "Any]) -> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\")) return await aioredis.create_pool(address,", ") exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\")", "attempts: {self._retry_count - i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has", "-> Any: exc: Exception for i in range(self._retry_count): try: return await super().execute(command, *args,", "import aioredis log = getLogger(__name__) class _ConnectionsPool(aioredis.ConnectionsPool): def __init__( self, *args: Any, retry_count:", "return await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e:", "super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async def execute(self, command: str,", "Exception for i in range(self._retry_count): try: return await super().execute(command, *args, **kwargs) except (", "log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str,", ") -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval async def", "int = 2, **kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count", "{command} has failed after {self._retry_count} retries\") raise exc async def create_redis_pool(config: Dict[str, Any])", "retry_count: int = 5, retry_interval: int = 2, **kwargs: Any ) -> None:", "**kwargs: Any ) -> None: super().__init__(*args, **kwargs) self._retry_count = retry_count self._retry_interval = retry_interval", "e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count - i}\" ) exc =", "self._retry_count = retry_count self._retry_interval = retry_interval async def execute(self, command: str, *args: Any,", "e await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise exc async", "execute(self, command: str, *args: Any, **kwargs: Any) -> Any: exc: Exception for i", "await super().execute(command, *args, **kwargs) except ( aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug(", "{self._retry_count - i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command {command} has failed", "remaining attempts: {self._retry_count - i}\" ) exc = e await sleep(self._retry_interval) log.error(f\"Command {command}", "await sleep(self._retry_interval) log.error(f\"Command {command} has failed after {self._retry_count} retries\") raise exc async def", "Dict[str, Any]) -> _ConnectionsPool: config = config.copy() address = (config.pop(\"host\"), config.pop(\"port\")) return await", "aioredis.ConnectionClosedError, aioredis.PoolClosedError, ConnectionRefusedError, ) as e: log.debug( f\"Command {command} failed, remaining attempts: {self._retry_count", "= retry_interval async def execute(self, command: str, *args: Any, **kwargs: Any) -> Any:" ]
[ "1, 100)) Y = X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y))", "= ConvAutoencoder() model = model.double() # tackles a type error # define loss", "for an autoencoder. import torch import numpy as np import torch.optim as optim", "Y = X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader =", "torch import nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder #", "model.shallow_autoencoder import ConvAutoencoder # load model definition model = ConvAutoencoder() model = model.double()", "100)) Y = X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader", "as np import torch.optim as optim from torch import nn from torch.utils.data import", "# <EMAIL> # MIT License # As-simple-as-possible training loop for an autoencoder. import", "model.double() # tackles a type error # define loss and optimizer criterion =", "a type error # define loss and optimizer criterion = nn.MSELoss() optimizer =", "# <NAME> # <EMAIL> # MIT License # As-simple-as-possible training loop for an", "for epoch in range(200): for x, y in dataloader: optimizer.zero_grad() # forward and", "out = model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item()) # loss should", "all cases, # since Y could differ from X (e.g. for denoising autoencoders).", "nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using separate input", "Toy data: # Using separate input and output variables to cover all cases,", "cases, # since Y could differ from X (e.g. for denoising autoencoders). X", "differ from X (e.g. for denoising autoencoders). X = np.random.random((300, 1, 100)) Y", "momentum=0.9) # Toy data: # Using separate input and output variables to cover", "from X (e.g. for denoising autoencoders). X = np.random.random((300, 1, 100)) Y =", "# As-simple-as-possible training loop for an autoencoder. import torch import numpy as np", "X (e.g. for denoising autoencoders). X = np.random.random((300, 1, 100)) Y = X", "model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item()) # loss should be decreasing", "and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data:", "# Training loop for epoch in range(200): for x, y in dataloader: optimizer.zero_grad()", "data: # Using separate input and output variables to cover all cases, #", "y in dataloader: optimizer.zero_grad() # forward and backward pass out = model(x) loss", "numpy as np import torch.optim as optim from torch import nn from torch.utils.data", "MIT License # As-simple-as-possible training loop for an autoencoder. import torch import numpy", "range(200): for x, y in dataloader: optimizer.zero_grad() # forward and backward pass out", "error # define loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001,", "= TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch", "pass out = model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item()) # loss", "= nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using separate", "= np.random.random((300, 1, 100)) Y = X # prepare pytorch dataloader dataset =", "output variables to cover all cases, # since Y could differ from X", "could differ from X (e.g. for denoising autoencoders). X = np.random.random((300, 1, 100))", "tackles a type error # define loss and optimizer criterion = nn.MSELoss() optimizer", "import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model definition model =", "lr=0.001, momentum=0.9) # Toy data: # Using separate input and output variables to", "optimizer.zero_grad() # forward and backward pass out = model(x) loss = criterion(out, y)", "in dataloader: optimizer.zero_grad() # forward and backward pass out = model(x) loss =", "# Using separate input and output variables to cover all cases, # since", "prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) #", "# MIT License # As-simple-as-possible training loop for an autoencoder. import torch import", "in range(200): for x, y in dataloader: optimizer.zero_grad() # forward and backward pass", "type error # define loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(),", "model = model.double() # tackles a type error # define loss and optimizer", "# Toy data: # Using separate input and output variables to cover all", "import ConvAutoencoder # load model definition model = ConvAutoencoder() model = model.double() #", "# define loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)", "np import torch.optim as optim from torch import nn from torch.utils.data import DataLoader,", "backward pass out = model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item()) #", "DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model definition model = ConvAutoencoder()", "and output variables to cover all cases, # since Y could differ from", "= DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch in range(200): for x,", "ConvAutoencoder() model = model.double() # tackles a type error # define loss and", "variables to cover all cases, # since Y could differ from X (e.g.", "denoising autoencoders). X = np.random.random((300, 1, 100)) Y = X # prepare pytorch", "from torch import nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder", "criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using", "optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using separate input and output variables", "# since Y could differ from X (e.g. for denoising autoencoders). X =", "np.random.random((300, 1, 100)) Y = X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X),", "optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using separate input and", "# prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True)", "torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch in range(200):", "autoencoder. import torch import numpy as np import torch.optim as optim from torch", "load model definition model = ConvAutoencoder() model = model.double() # tackles a type", "ConvAutoencoder # load model definition model = ConvAutoencoder() model = model.double() # tackles", "an autoencoder. import torch import numpy as np import torch.optim as optim from", "optim from torch import nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import", "# load model definition model = ConvAutoencoder() model = model.double() # tackles a", "dataloader: optimizer.zero_grad() # forward and backward pass out = model(x) loss = criterion(out,", "Y could differ from X (e.g. for denoising autoencoders). X = np.random.random((300, 1,", "training loop for an autoencoder. import torch import numpy as np import torch.optim", "= X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset,", "<NAME> # <EMAIL> # MIT License # As-simple-as-possible training loop for an autoencoder.", "shuffle=True) # Training loop for epoch in range(200): for x, y in dataloader:", "X = np.random.random((300, 1, 100)) Y = X # prepare pytorch dataloader dataset", "import numpy as np import torch.optim as optim from torch import nn from", "(e.g. for denoising autoencoders). X = np.random.random((300, 1, 100)) Y = X #", "torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model definition model", "loop for epoch in range(200): for x, y in dataloader: optimizer.zero_grad() # forward", "define loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) #", "and backward pass out = model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item())", "DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch in range(200): for x, y", "<EMAIL> # MIT License # As-simple-as-possible training loop for an autoencoder. import torch", "torch.optim as optim from torch import nn from torch.utils.data import DataLoader, TensorDataset from", "for denoising autoencoders). X = np.random.random((300, 1, 100)) Y = X # prepare", "import torch.optim as optim from torch import nn from torch.utils.data import DataLoader, TensorDataset", "batch_size=256, shuffle=True) # Training loop for epoch in range(200): for x, y in", "Using separate input and output variables to cover all cases, # since Y", "from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model definition", "autoencoders). X = np.random.random((300, 1, 100)) Y = X # prepare pytorch dataloader", "loop for an autoencoder. import torch import numpy as np import torch.optim as", "License # As-simple-as-possible training loop for an autoencoder. import torch import numpy as", "Training loop for epoch in range(200): for x, y in dataloader: optimizer.zero_grad() #", "TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch in", "import nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load", "x, y in dataloader: optimizer.zero_grad() # forward and backward pass out = model(x)", "as optim from torch import nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder", "model = ConvAutoencoder() model = model.double() # tackles a type error # define", "nn from torch.utils.data import DataLoader, TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model", "forward and backward pass out = model(x) loss = criterion(out, y) loss.backward() optimizer.step()", "torch import numpy as np import torch.optim as optim from torch import nn", "model definition model = ConvAutoencoder() model = model.double() # tackles a type error", "= optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: # Using separate input and output", "dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for epoch in range(200): for", "since Y could differ from X (e.g. for denoising autoencoders). X = np.random.random((300,", "# forward and backward pass out = model(x) loss = criterion(out, y) loss.backward()", "to cover all cases, # since Y could differ from X (e.g. for", "epoch in range(200): for x, y in dataloader: optimizer.zero_grad() # forward and backward", "import torch import numpy as np import torch.optim as optim from torch import", "optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy data: #", "= model.double() # tackles a type error # define loss and optimizer criterion", "As-simple-as-possible training loop for an autoencoder. import torch import numpy as np import", "loss and optimizer criterion = nn.MSELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) # Toy", "pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training", "input and output variables to cover all cases, # since Y could differ", "cover all cases, # since Y could differ from X (e.g. for denoising", "from model.shallow_autoencoder import ConvAutoencoder # load model definition model = ConvAutoencoder() model =", "# tackles a type error # define loss and optimizer criterion = nn.MSELoss()", "for x, y in dataloader: optimizer.zero_grad() # forward and backward pass out =", "separate input and output variables to cover all cases, # since Y could", "TensorDataset from model.shallow_autoencoder import ConvAutoencoder # load model definition model = ConvAutoencoder() model", "dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop", "= model(x) loss = criterion(out, y) loss.backward() optimizer.step() print(loss.item()) # loss should be", "definition model = ConvAutoencoder() model = model.double() # tackles a type error #", "X # prepare pytorch dataloader dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256,", "dataset = TensorDataset(torch.tensor(X), torch.tensor(Y)) dataloader = DataLoader(dataset, batch_size=256, shuffle=True) # Training loop for" ]
[ "times = []double() for i in range(4): t0 = clock() res = copy_list(addr(a),", "for i in range(n): b = a[:] for j in range(10): b.push_back( j", "i in range(4): t0 = clock() res = copy_list(addr(a), 10000) tk = clock()", "def test(): a = range(1000) times = []double() for i in range(4): t0", "t0 = clock() res = copy_list(addr(a), 10000) tk = clock() times.append(tk - t0)", "= clock() times.append(tk - t0) avg = sumd(times) / len(times) print(avg) def main():", "return x def test(): a = range(1000) times = []double() for i in", "from time import clock from runtime import * with stack: def copy_list( a:[]int,", "clock from runtime import * with stack: def copy_list( a:[]int, n ) ->", "i in range(n): b = a[:] for j in range(10): b.push_back( j )", "in range(10): b.push_back( j ) x.push_back( b ) return x def test(): a", "a[:] for j in range(10): b.push_back( j ) x.push_back( b ) return x", "= copy_list(addr(a), 10000) tk = clock() times.append(tk - t0) avg = sumd(times) /", "def copy_list( a:[]int, n ) -> [][]int: x = [][]int() for i in", "b = a[:] for j in range(10): b.push_back( j ) x.push_back( b )", "j in range(10): b.push_back( j ) x.push_back( b ) return x def test():", "x def test(): a = range(1000) times = []double() for i in range(4):", "benchmark''' from time import clock from runtime import * with stack: def copy_list(", "with stack: def copy_list( a:[]int, n ) -> [][]int: x = [][]int() for", "times.append(tk - t0) avg = sumd(times) / len(times) print(avg) def main(): test() main()", "[][]int: x = [][]int() for i in range(n): b = a[:] for j", "for i in range(4): t0 = clock() res = copy_list(addr(a), 10000) tk =", "list micro benchmark''' from time import clock from runtime import * with stack:", "clock() res = copy_list(addr(a), 10000) tk = clock() times.append(tk - t0) avg =", "for j in range(10): b.push_back( j ) x.push_back( b ) return x def", "range(1000) times = []double() for i in range(4): t0 = clock() res =", "10000) tk = clock() times.append(tk - t0) avg = sumd(times) / len(times) print(avg)", "res = copy_list(addr(a), 10000) tk = clock() times.append(tk - t0) avg = sumd(times)", "time import clock from runtime import * with stack: def copy_list( a:[]int, n", "= a[:] for j in range(10): b.push_back( j ) x.push_back( b ) return", "= []double() for i in range(4): t0 = clock() res = copy_list(addr(a), 10000)", "in range(n): b = a[:] for j in range(10): b.push_back( j ) x.push_back(", "n ) -> [][]int: x = [][]int() for i in range(n): b =", "[][]int() for i in range(n): b = a[:] for j in range(10): b.push_back(", "'''copy list micro benchmark''' from time import clock from runtime import * with", "stack: def copy_list( a:[]int, n ) -> [][]int: x = [][]int() for i", "x = [][]int() for i in range(n): b = a[:] for j in", "test(): a = range(1000) times = []double() for i in range(4): t0 =", "import clock from runtime import * with stack: def copy_list( a:[]int, n )", "a:[]int, n ) -> [][]int: x = [][]int() for i in range(n): b", ") return x def test(): a = range(1000) times = []double() for i", ") x.push_back( b ) return x def test(): a = range(1000) times =", "[]double() for i in range(4): t0 = clock() res = copy_list(addr(a), 10000) tk", "b.push_back( j ) x.push_back( b ) return x def test(): a = range(1000)", "in range(4): t0 = clock() res = copy_list(addr(a), 10000) tk = clock() times.append(tk", "<reponame>secureosv/pythia<filename>regtests/bench/copy_list-typed-stack.py '''copy list micro benchmark''' from time import clock from runtime import *", "range(n): b = a[:] for j in range(10): b.push_back( j ) x.push_back( b", "b ) return x def test(): a = range(1000) times = []double() for", "clock() times.append(tk - t0) avg = sumd(times) / len(times) print(avg) def main(): test()", "j ) x.push_back( b ) return x def test(): a = range(1000) times", "runtime import * with stack: def copy_list( a:[]int, n ) -> [][]int: x", "a = range(1000) times = []double() for i in range(4): t0 = clock()", "from runtime import * with stack: def copy_list( a:[]int, n ) -> [][]int:", "tk = clock() times.append(tk - t0) avg = sumd(times) / len(times) print(avg) def", "= range(1000) times = []double() for i in range(4): t0 = clock() res", "micro benchmark''' from time import clock from runtime import * with stack: def", "-> [][]int: x = [][]int() for i in range(n): b = a[:] for", ") -> [][]int: x = [][]int() for i in range(n): b = a[:]", "= [][]int() for i in range(n): b = a[:] for j in range(10):", "= clock() res = copy_list(addr(a), 10000) tk = clock() times.append(tk - t0) avg", "copy_list(addr(a), 10000) tk = clock() times.append(tk - t0) avg = sumd(times) / len(times)", "x.push_back( b ) return x def test(): a = range(1000) times = []double()", "range(4): t0 = clock() res = copy_list(addr(a), 10000) tk = clock() times.append(tk -", "range(10): b.push_back( j ) x.push_back( b ) return x def test(): a =", "* with stack: def copy_list( a:[]int, n ) -> [][]int: x = [][]int()", "import * with stack: def copy_list( a:[]int, n ) -> [][]int: x =", "copy_list( a:[]int, n ) -> [][]int: x = [][]int() for i in range(n):" ]
[ "guild_id is the id of the guild that has the message to expand.", "function returns True, allow the message to be expanded to another guild. pass", "simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with", "the message to expand. # If this function returns True, allow the message", "commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\")", "pass # Customize the behavior of the deployment. # All Cogs are named", "id of the guild that has the message to expand. # If this", "the behavior of the deployment. # All Cogs are named ExpanderCog. bot.get_cog(\"ExpanderCog\").expander =", "async def check_global_expand(self, guild_id): # guild_id is the id of the guild that", "bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook.", "message to expand. # If this function returns True, allow the message to", "guild that has the message to expand. # If this function returns True,", "behavior of the deployment. # All Cogs are named ExpanderCog. bot.get_cog(\"ExpanderCog\").expander = Expander", "that has the message to expand. # If this function returns True, allow", "all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id):", "returns True, allow the message to be expanded to another guild. pass #", "self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async", "to another guild. pass # Customize the behavior of the deployment. # All", "def check_global_expand(self, guild_id): # guild_id is the id of the guild that has", "commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all", "# Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and", "bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") #", "of the guild that has the message to expand. # If this function", "is the id of the guild that has the message to expand. #", "and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): #", "from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand", "with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is", "# Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook. #", "<reponame>furimu1234/discord-ext-utils from discord.ext.utils import MinimalExpander from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) #", "# guild_id is the id of the guild that has the message to", "send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id", "Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is the id of the guild", "True, allow the message to be expanded to another guild. pass # Customize", "Customize the behavior of the deployment. # All Cogs are named ExpanderCog. bot.get_cog(\"ExpanderCog\").expander", "message to be expanded to another guild. pass # Customize the behavior of", "this function returns True, allow the message to be expanded to another guild.", "# self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is the id", "all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class", "the guild that has the message to expand. # If this function returns", "has the message to expand. # If this function returns True, allow the", "of the deployment. # All Cogs are named ExpanderCog. bot.get_cog(\"ExpanderCog\").expander = Expander bot.run(\"TOKEN\")", "the message to be expanded to another guild. pass # Customize the behavior", "If this function returns True, allow the message to be expanded to another", "Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self,", "import MinimalExpander from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\")", "# If this function returns True, allow the message to be expanded to", "expand. # If this function returns True, allow the message to be expanded", "be expanded to another guild. pass # Customize the behavior of the deployment.", "expanded to another guild. pass # Customize the behavior of the deployment. #", "self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is the id of", "allow the message to be expanded to another guild. pass # Customize the", "# Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def", "discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all.", "to be expanded to another guild. pass # Customize the behavior of the", "discord.ext.utils import MinimalExpander from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple.", "to expand. # If this function returns True, allow the message to be", "guild. pass # Customize the behavior of the deployment. # All Cogs are", "check_global_expand(self, guild_id): # guild_id is the id of the guild that has the", "# Customize the behavior of the deployment. # All Cogs are named ExpanderCog.", "import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. #", "# self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander):", "Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send with webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\")", "class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is the id of the", "another guild. pass # Customize the behavior of the deployment. # All Cogs", "MinimalExpander from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") #", "= commands.Bot(command_prefix=commands.when_mentioned) # Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand", "Expand simple. bot.load_extension(\"discord.ext.utils.cogs.minimal_expander\") # Expand all. # self.load_extension(\"discord.ext.utils.cogs.expander\") # Expand all and send", "guild_id): # guild_id is the id of the guild that has the message", "the id of the guild that has the message to expand. # If", "webhook. # self.load_extension(\"discord.ext.utils.cogs.webhook_expander\") class Expander(MinimalExpander): async def check_global_expand(self, guild_id): # guild_id is the", "from discord.ext.utils import MinimalExpander from discord.ext import commands bot = commands.Bot(command_prefix=commands.when_mentioned) # Expand" ]
[]
[ "subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) #", "args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror = 'true' print('Mirror mode:", "seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val = 10 # interpolate #", "frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent times for frame", "= ax.text(-75, 150, 'test1', color='black') fig.show() # show the figure before blitting def", "of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1)", "10 # interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new", "colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure to copy background", "# new shape fig = plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) #", "parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for selfie (default: false)') args", "interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar", "mlx90641 shape mlx_interp_val = 10 # interpolate # on each dimension mlx_interp_shape =", "action='store_const', default='false', const='imageMirror', help='Flip the image for selfie (default: false)') args = parser.parse_args()", "reshape, flip data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) #", "default='false', const='imageMirror', help='Flip the image for selfie (default: false)') args = parser.parse_args() imageMirror", "'test1', color='black') fig.show() # show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) #", "= (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) # start figure ax", "mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror ==", "= ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1)", "argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for selfie", "dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for selfie (default: false)') args =", "t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent times for frame rate approx", "'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate", "while True: t1 = time.monotonic() # for determining frame rate try: plot_update() #", "mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val = 10 #", "Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for selfie (default:", "true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape", "each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) #", "false') else: imageMirror = 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame =", "ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75,", "t_array = [] while True: t1 = time.monotonic() # for determining frame rate", "t_array = t_array[1:] # recent times for frame rate approx print('Frame Rate: {0:2.1f}fps'.format(len(t_array)/np.sum(t_array)))", "= seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16)", "# add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none',", "# mlx90641 shape mlx_interp_val = 10 # interpolate # on each dimension mlx_interp_shape", "fig.canvas.draw() # draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background", "mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) # start figure", "# show the new image fig.show() return t_array = [] while True: t1", "therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw", "new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the", "therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) # setup colorbar", "* 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val =", "= fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw", "shape mlx_interp_val = 10 # interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val,", "ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() # show", "#! /usr/bin/python3 import seeed_mlx9064x import time,board,busio import numpy as np import adafruit_mlx90640 import", "= argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for", "flip data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate", "(mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) # start figure ax =", "colorbar label fig.canvas.draw() # draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) #", "approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent times for", "colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) #", "= 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192", "fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1',", "# restore background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip", "adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import ndimage import argparse parser =", "fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive", "150, 'test1', color='black') fig.show() # show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background)", "# reshape, flip data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val)", "== 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set", "125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() # show the", "return t_array = [] while True: t1 = time.monotonic() # for determining frame", "if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror = 'true' print('Mirror mode: true')", "before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640 data_array", "fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape,", "t1 = time.monotonic() # for determining frame rate try: plot_update() # update plot", "len(t_array)>10: t_array = t_array[1:] # recent times for frame rate approx print('Frame Rate:", "'false'): print('Mirror mode: false') else: imageMirror = 'true' print('Mirror mode: true') mlx =", "on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9))", "for determining frame rate try: plot_update() # update plot except: continue # approximating", "= [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape", "frame rate try: plot_update() # update plot except: continue # approximating frame rate", "setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure to copy", "blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640 data_array =", "update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox)", "True: t1 = time.monotonic() # for determining frame rate try: plot_update() # update", "[] while True: t1 = time.monotonic() # for determining frame rate try: plot_update()", "= (12,16) # mlx90641 shape mlx_interp_val = 10 # interpolate # on each", "textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the new image fig.show()", "fig.show() return t_array = [] while True: t1 = time.monotonic() # for determining", "# for determining frame rate try: plot_update() # update plot except: continue #", "= ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature", "= 10 # interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) #", "shape fig = plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) # add subplot", "selfie (default: false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror", "data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array))", "try: plot_update() # update plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if", "image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the new image", "= seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val = 10 # interpolate", "mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'): data_array =", "interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig", "time,board,busio import numpy as np import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy", "update plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array =", "print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate =", "plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background", "else: imageMirror = 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0]", "ax.text(-75, 150, 'test1', color='black') fig.show() # show the figure before blitting def plot_update():", "copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue", "bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image", "rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent times for frame rate", "<reponame>askehill/covis2 #! /usr/bin/python3 import seeed_mlx9064x import time,board,busio import numpy as np import adafruit_mlx90640", "mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ", "= fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1", "# set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new", "= fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150,", "import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip", "padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) # setup", "add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45)", "mode: false') else: imageMirror = 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame", "mlx_interp_val = 10 # interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val)", "fig = plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95)", "# approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent times", "as np import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import ndimage import", "= plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) #", "data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'): data_array = np.flipud(data_array)", "imageMirror = 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] *", "ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14)", "if len(t_array)>10: t_array = t_array[1:] # recent times for frame rate approx print('Frame", "set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal", "# colorbar label fig.canvas.draw() # draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox)", "const='imageMirror', help='Flip the image for selfie (default: false)') args = parser.parse_args() imageMirror =", "# update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1)))", "the new image fig.show() return t_array = [] while True: t1 = time.monotonic()", "# interpolate # on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape", "fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding therm1 =", "draw background fig.canvas.flush_events() # show the new image fig.show() return t_array = []", "figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640", "ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary padding", "background fig.canvas.flush_events() # show the new image fig.show() return t_array = [] while", "plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get", "[$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure to copy background ax_background =", "seeed_mlx9064x import time,board,busio import numpy as np import adafruit_mlx90640 import matplotlib.pyplot as plt", "1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the new image fig.show() return", "Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image for selfie (default: false)')", "# show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame)", "start figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of", "(12,16) # mlx90641 shape mlx_interp_val = 10 # interpolate # on each dimension", "[0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val", "mlx = seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape =", "parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror =", "parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the image", "import seeed_mlx9064x import time,board,busio import numpy as np import adafruit_mlx90640 import matplotlib.pyplot as", "# start figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid", "figure ax = fig.add_subplot(111) # add subplot fig.subplots_adjust(0.05,0.05,0.95,0.95) # get rid of unnecessary", "plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:]", "range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw", "= [] while True: t1 = time.monotonic() # for determining frame rate try:", "draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show", "color='black') fig.show() # show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore", "cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) #", "data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array)", "image for selfie (default: false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror ==", "fig.show() # show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background", "# read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'):", "'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data", "np import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import ndimage import argparse", "import matplotlib.pyplot as plt from scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal", "scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const',", "import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import ndimage import argparse parser", "cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array),", "background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror", "show the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) #", "color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() # show the figure before", "therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar range", "= time.monotonic() # for determining frame rate try: plot_update() # update plot except:", "# interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update", "matplotlib.pyplot as plt from scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera", "textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() # show the figure before blitting", "numpy as np import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import ndimage", "label fig.canvas.draw() # draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy", "ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) #", "cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure to copy background ax_background", "'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() # show the figure", "# preemptive image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar", "= args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror = 'true' print('Mirror", "read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'): data_array", "/usr/bin/python3 import seeed_mlx9064x import time,board,busio import numpy as np import adafruit_mlx90640 import matplotlib.pyplot", "fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the new image fig.show() return t_array", "np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array =", "new shape fig = plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111) # add", "# get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image", "== 'false'): print('Mirror mode: false') else: imageMirror = 'true' print('Mirror mode: true') mlx", "plot_update() # update plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10:", "fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure", "= np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) #", "time.monotonic() # for determining frame rate try: plot_update() # update plot except: continue", "import time,board,busio import numpy as np import adafruit_mlx90640 import matplotlib.pyplot as plt from", "mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) # start figure ax = fig.add_subplot(111)", "rate try: plot_update() # update plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1)", "unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar = fig.colorbar(therm1) #", "# update plot except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array", "# draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() #", "# on each dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig =", "rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar =", "# set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001)", "plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) #", "copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show()", "= np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array", "determining frame rate try: plot_update() # update plot except: continue # approximating frame", "ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror',", "# draw background fig.canvas.flush_events() # show the new image fig.show() return t_array =", "background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black') fig.show() #", "get rid of unnecessary padding therm1 = ax.imshow(np.zeros(mlx_interp_shape),interpolation='none', cmap=plt.cm.bwr,vmin=25,vmax=45) # preemptive image cbar", "restore background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape)) # reshape, flip data", "argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false', const='imageMirror', help='Flip the", "help='Flip the image for selfie (default: false)') args = parser.parse_args() imageMirror = args.imageMirror", "import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror', action='store_const', default='false',", "data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds", "print('Mirror mode: false') else: imageMirror = 'true' print('Mirror mode: true') mlx = seeed_mlx9064x.grove_mxl90641()", "args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else:", "the image for selfie (default: false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror", "set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1)", "except: continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] #", "data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set bounds cbar.on_mappable_changed(therm1) # update colorbar range plt.pause(0.001) ax.draw_artist(therm1) #", "frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641", "np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) # set data therm1.set_clim(vmin=np.min(data_array),vmax=np.max(data_array)) # set", "imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror = 'true'", "mlx_shape = (12,16) # mlx90641 shape mlx_interp_val = 10 # interpolate # on", "fig.canvas.flush_events() # show the new image fig.show() return t_array = [] while True:", "the figure before blitting def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read", "to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red')", "as plt from scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program')", "plt from scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror',", "(default: false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode:", "image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw()", "ax.draw_artist(therm1) # draw new thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events()", "continue # approximating frame rate t_array.append(time.monotonic()-t1) if len(t_array)>10: t_array = t_array[1:] # recent", "false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false')", "show the new image fig.show() return t_array = [] while True: t1 =", "= parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'): print('Mirror mode: false') else: imageMirror", "preemptive image cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label", "dimension mlx_interp_shape = (mlx_shape[0]*mlx_interp_val, mlx_shape[1]*mlx_interp_val) # new shape fig = plt.figure(figsize=(12,9)) # start", "cbar = fig.colorbar(therm1) # setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() #", "# draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75,", "image fig.show() return t_array = [] while True: t1 = time.monotonic() # for", "def plot_update(): fig.canvas.restore_region(ax_background) # restore background mlx.getFrame(frame) # read mlx90640 data_array = np.fliplr(np.reshape(frame,mlx_shape))", "background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue =", "new image fig.show() return t_array = [] while True: t1 = time.monotonic() #", "draw figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125,", "figure to copy background ax_background = fig.canvas.copy_from_bbox(ax.bbox) # copy background ax.text(-75, 125, 'Max:',", "for selfie (default: false)') args = parser.parse_args() imageMirror = args.imageMirror if(imageMirror == 'false'):", "# setup colorbar cbar.set_label('Temperature [$^{\\circ}$C]',fontsize=14) # colorbar label fig.canvas.draw() # draw figure to", "from scipy import ndimage import argparse parser = argparse.ArgumentParser(description='Thermal Camera Program') parser.add_argument('--mirror', dest='imageMirror',", "# copy background ax.text(-75, 125, 'Max:', color='red') textMaxValue = ax.text(-75, 150, 'test1', color='black')", "thermal image textMaxValue.set_text(str(np.round(np.max(data_array), 1))) fig.canvas.blit(ax.bbox) # draw background fig.canvas.flush_events() # show the new", "if(imageMirror == 'true'): data_array = np.flipud(data_array) data_array = ndimage.zoom(data_array,mlx_interp_val) # interpolate therm1.set_array(data_array) #", "192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) # mlx90641 shape mlx_interp_val = 10", "import numpy as np import adafruit_mlx90640 import matplotlib.pyplot as plt from scipy import", "seeed_mlx9064x.grove_mxl90641() frame = [0] * 192 mlx.refresh_rate = seeed_mlx9064x.RefreshRate.REFRESH_8_HZ mlx_shape = (12,16) #" ]
[ "..bases import Event from .bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject class", "_application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data,", "with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped())", "PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment )", ") @classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self):", "def start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _", "### PUBLIC PROPERTIES ### @property def clock(self): return self._clock @property def is_running(self): return", "@dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to know", "__init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock()", "= 0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON =", "return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs)", "denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback)", "self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await", "_serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, }", "Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases import Event", "6 CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup()", "asyncio import dataclasses import enum from typing import Dict, Optional, Set, Tuple from", "self.is_running: await self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs)", "5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self)", "async def perform(self, midi_messages): if ( self.application is None or self.application.status != self.application.Status.REALTIME", "self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context,", "self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self): return self._clock @property def is_running(self):", "ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class", "_ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with", "from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum):", "async def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async def cancel(self,", "**kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if ( self.application", "} def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC", "self.application is None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__", "not self.is_running: await self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args,", "\"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running:", "self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for", "/ 4 ### PUBLIC METHODS ### async def cue(self, *args, **kwargs) -> int:", "data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\":", "async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator])", "list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4", "/ clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async def cue(self, *args, **kwargs)", "async def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self,", "self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self):", "midi_messages): if ( self.application is None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree(", "_tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ###", "supriya.clocks import AsyncTempoClock, Moment from ..bases import Event from .bases import ApplicationObject from", "None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _", "= 5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self):", "asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop()", "CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER ### def", "type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment))", "if not self.is_running: await self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]: return", "!= self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) )", "**kwargs) async def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def", "self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject]", "for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async", ") await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async def reschedule(self, *args,", "= 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM =", "return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs)", "**kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async", "return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class", "@property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass", "from typing import Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from", "denominator]) async def start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start()", "Moment from ..bases import Event from .bases import ApplicationObject from .parameters import ParameterGroup,", "set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ### async def _application_perform_callback(self,", "self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async def reschedule(self, *args, **kwargs) ->", "await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush()", "ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set()", "import Event from .bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject):", "MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT", "### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] =", "### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF", "= set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ### async def", "midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object): await", "def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages):", "*args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if (", "{} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None", "Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args,", "transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute,", "*args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute)", "4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER ###", "await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop()", "TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to know start delta", "-> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) -> int: return", "stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await", "await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self): return self._clock @property", "= None ### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform(", "ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE", "= 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH =", "def perform(self, midi_messages): if ( self.application is None or self.application.status != self.application.Status.REALTIME ):", "### @property def clock(self): return self._clock @property def is_running(self): return self._clock.is_running @property def", "schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float):", "INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {}", "await self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async", "PROPERTIES ### @property def clock(self): return self._clock @property def is_running(self): return self._clock.is_running @property", "): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback,", "self._clock @property def is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class", "_ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property", "dataclasses import enum from typing import Dict, Optional, Set, Tuple from supriya.clocks import", "Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1", "parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class", "**kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def", "self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await", "self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self): return self._clock", "[self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message):", "or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in", "CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM =", "self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1]", "midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async def reschedule(self,", "self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await", "transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": {", "{ \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1", "async def start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for", "= 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT =", "args=midi_messages) if not self.is_running: await self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]:", "Event from .bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ###", "def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args,", "= 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER", "class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to know start", "moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def", "None ### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message],", "await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self): return", "EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3", "ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id", "asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES", "self.start() async def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def", "*args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) ->", "transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature),", "self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if ( self.application is None or self.application.status", "def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls,", "DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM = 7 ###", "def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]): self._tick_event_id", "METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod", "**kwargs) -> int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]:", "\"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] /", "def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args,", "self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if", "import ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ###", "CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON", "= {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id =", "{ \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self,", "for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ###", "import enum from typing import Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock,", "\"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 /", "@property def is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event):", "### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async", "VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM = 2", "@classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return", "return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event):", "-> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def", "PUBLIC PROPERTIES ### @property def clock(self): return self._clock @property def is_running(self): return self._clock.is_running", "DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM", "import Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases import", "import AsyncTempoClock, Moment from ..bases import Event from .bases import ApplicationObject from .parameters", "await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await", "await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC", "SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH", "in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with self.lock([self]):", "int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args,", "_ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async", "async def stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in", "def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock =", "await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"])", "pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to", "def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS", "in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async def", "set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def", "Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases import Event from .bases", "self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None),", "4 ### PUBLIC METHODS ### async def cue(self, *args, **kwargs) -> int: return", "### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM", "async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self,", "is None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for", "self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator):", "METHODS ### async def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async", "*args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) ->", "return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if ( self.application is None or", "Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases import Event from .bases import", "\"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return", "@property def clock(self): return self._clock @property def is_running(self): return self._clock.is_running @property def parameters(self):", "0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4", "TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs", "### async def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async def", "self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async", "clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async", "= 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str,", "from .bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS", "-> int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return", "return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def", "1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async def cue(self, *args,", "self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not", "Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group])", "self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await", "self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self): return self._clock @property def", "with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await", "pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to know start delta moment:", "async def reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self,", "def stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies])", "await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start() async def reschedule(self, *args, **kwargs)", "perform(self, midi_messages): if ( self.application is None or self.application.status != self.application.Status.REALTIME ): return", "async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies])", "is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass", "Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases import Event from", "class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE = 1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF =", "return self._clock @property def is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass", "PUBLIC METHODS ### async def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs)", "self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with self.lock([self]): await", "self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async def", "self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages])", "\"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), }, } def _tick_callback(self, clock_context):", "return self._debug_tree( self, \"Perform\", suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages)", "await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\":", "async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return {", "float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async", "self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def clock(self):", "@dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView needs to know start delta moment: Moment", "Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ### async", "**kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator,", "set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]): self._tick_event_id =", "self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async", "self._tick_event_id = None ### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await", "start(self): async with self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in", "7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject]", "### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock", "clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async def cue(self, *args, **kwargs) ->", "2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6", "CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters:", "### PRIVATE METHODS ### async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment", "self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ###", "= await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async", "await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted()) async def", "def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\":", "**kwargs) async def perform(self, midi_messages): if ( self.application is None or self.application.status !=", "for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await self.start()", "@dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO:", "= AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE", "[midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"])", "AsyncTempoClock, Moment from ..bases import Event from .bases import ApplicationObject from .parameters import", "reschedule(self, *args, **kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs)", "def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async", "1 MIDI_PERFORM = 2 DEVICE_NOTE_OFF = 3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5", "self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start() self.application.pubsub.publish(TransportStarted())", "ApplicationObject.__init__(self) self._parameter_group = ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies:", "from ..bases import Event from .bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject", "import asyncio import dataclasses import enum from typing import Dict, Optional, Set, Tuple", "= 6 CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group =", "**kwargs) -> Optional[int]: return self._clock.reschedule(*args, **kwargs) async def schedule(self, *args, **kwargs) -> int:", "### PUBLIC METHODS ### async def cue(self, *args, **kwargs) -> int: return self._clock.cue(*args,", "CLIP_EDIT = 6 CLIP_PERFORM = 7 ### INITIALIZER ### def __init__(self): ApplicationObject.__init__(self) self._parameter_group", "self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ###", "return 1 / clock_context.desired_moment.time_signature[1] / 4 ### PUBLIC METHODS ### async def cue(self,", "in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id) self.application.pubsub.publish(TransportStopped()) ### PUBLIC PROPERTIES ### @property def", ".parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE", "async with self.lock([self]): await asyncio.gather(*[_._stop() for _ in self._dependencies]) await self.application.flush() await self.cancel(self._tick_event_id)", "import dataclasses import enum from typing import Dict, Optional, Set, Tuple from supriya.clocks", "async def _application_perform_callback(self, clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def", "clock_context, midi_message): await self.application.perform( [midi_message], moment=clock_context.current_moment ) @classmethod async def _deserialize(cls, data, transport_object):", "import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE =", "}, } def _tick_callback(self, clock_context): self.application.pubsub.publish(TransportTicked(clock_context.desired_moment)) return 1 / clock_context.desired_moment.time_signature[1] / 4 ###", "cue(self, *args, **kwargs) -> int: return self._clock.cue(*args, **kwargs) async def cancel(self, *args, **kwargs)", "from supriya.clocks import AsyncTempoClock, Moment from ..bases import Event from .bases import ApplicationObject", "class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): # TODO: ClipView", "async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]):", "def schedule(self, *args, **kwargs) -> int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute:", "numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with self.lock([self]): self._tick_event_id = await", "def clock(self): return self._clock @property def is_running(self): return self._clock.is_running @property def parameters(self): return", "_deserialize(cls, data, transport_object): await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__,", "self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self): async with", "self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass class TransportTicked(Event): #", "self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event):", "suffix=repr([type(_).__name__ for _ in midi_messages]) ) await self.schedule(self._application_perform_callback, args=midi_messages) if not self.is_running: await", "int: return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self,", "self.lock([self]): self._tick_event_id = await self.cue(self._tick_callback) await asyncio.gather(*[_._start() for _ in self._dependencies]) await self._clock.start()", "def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\": self._clock.beats_per_minute, \"time_signature\": list(self._clock.time_signature), },", "( self.application is None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self, \"Perform\",", "return self._clock.schedule(*args, **kwargs) async def set_tempo(self, beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator,", "def is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass", "Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if ( self.application is None", "self.application.pubsub.publish(TransportStarted()) async def stop(self): await self._clock.stop() async with self.lock([self]): await asyncio.gather(*[_._stop() for _", "if ( self.application is None or self.application.status != self.application.Status.REALTIME ): return self._debug_tree( self,", "= ParameterGroup() self._parameters: Dict[str, ParameterObject] = {} self._clock = AsyncTempoClock() self._dependencies: Set[ApplicationObject] =", "enum from typing import Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment", "ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0", "beats_per_minute: float): self._clock.change(beats_per_minute=beats_per_minute) async def set_time_signature(self, numerator, denominator): self._clock.change(time_signature=[numerator, denominator]) async def start(self):", "typing import Dict, Optional, Set, Tuple from supriya.clocks import AsyncTempoClock, Moment from ..bases", "AsyncTempoClock() self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS", "await transport_object.set_tempo(data[\"spec\"][\"tempo\"]) await transport_object.set_time_signature(*data[\"spec\"][\"time_signature\"]) def _serialize(self): return { \"kind\": type(self).__name__, \"spec\": { \"tempo\":", "cancel(self, *args, **kwargs) -> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if", "3 DEVICE_NOTE_ON = 4 CLIP_LAUNCH = 5 CLIP_EDIT = 6 CLIP_PERFORM = 7", "-> Optional[Tuple]: return self._clock.cancel(*args, **kwargs) async def perform(self, midi_messages): if ( self.application is", "self._dependencies: Set[ApplicationObject] = set() self._mutate(slice(None), [self._parameter_group]) self._tick_event_id = None ### PRIVATE METHODS ###", "class Transport(ApplicationObject): ### CLASS VARIABLES ### class EventType(enum.IntEnum): CHANGE = 0 SCHEDULE =", "clock(self): return self._clock @property def is_running(self): return self._clock.is_running @property def parameters(self): return self._parameters", "def parameters(self): return self._parameters @dataclasses.dataclass class TransportStarted(Event): pass @dataclasses.dataclass class TransportStopped(Event): pass @dataclasses.dataclass", ".bases import ApplicationObject from .parameters import ParameterGroup, ParameterObject class Transport(ApplicationObject): ### CLASS VARIABLES" ]
[ "import admin from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone", "42 default_zoom = 2 list_display = ('names', 'fulladdress', 'comments') inlines = [PhoneInline, EmailInline]", "Email class PhoneInline(admin.TabularInline): model = Phone extra = 1 class EmailInline(admin.TabularInline): model =", "extra = 1 class EmailInline(admin.TabularInline): model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin):", "from django.contrib.gis import admin from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model", ".models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra = 1", "= 2 # default_lat = 42 default_zoom = 2 list_display = ('names', 'fulladdress',", "Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra = 1 class EmailInline(admin.TabularInline):", "# default_lat = 42 default_zoom = 2 list_display = ('names', 'fulladdress', 'comments') inlines", "default_lat = 6000000 # default_lon = 2 # default_lat = 42 default_zoom =", "EmailInline(admin.TabularInline): model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat", "Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 #", "<gh_stars>0 from django.contrib.gis import admin from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline):", "HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 # default_lon = 2 # default_lat", "= Phone extra = 1 class EmailInline(admin.TabularInline): model = Email extra = 1", "PhoneInline(admin.TabularInline): model = Phone extra = 1 class EmailInline(admin.TabularInline): model = Email extra", "= 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 # default_lon =", "default_lat = 42 default_zoom = 2 list_display = ('names', 'fulladdress', 'comments') inlines =", "django.contrib.gis import admin from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model =", "import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra = 1 class", "= Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000", "# default_lon = 2 # default_lat = 42 default_zoom = 2 list_display =", "default_zoom = 2 list_display = ('names', 'fulladdress', 'comments') inlines = [PhoneInline, EmailInline] admin.site.register(Hivernant,", "2 # default_lat = 42 default_zoom = 2 list_display = ('names', 'fulladdress', 'comments')", "1 class EmailInline(admin.TabularInline): model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon =", "model = Phone extra = 1 class EmailInline(admin.TabularInline): model = Email extra =", "default_lon = 300000 default_lat = 6000000 # default_lon = 2 # default_lat =", "admin from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra", "class EmailInline(admin.TabularInline): model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000", "1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 # default_lon = 2", "= 300000 default_lat = 6000000 # default_lon = 2 # default_lat = 42", "class PhoneInline(admin.TabularInline): model = Phone extra = 1 class EmailInline(admin.TabularInline): model = Email", "extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 # default_lon", "= 1 class EmailInline(admin.TabularInline): model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon", "= 6000000 # default_lon = 2 # default_lat = 42 default_zoom = 2", "class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat = 6000000 # default_lon = 2 #", "Phone extra = 1 class EmailInline(admin.TabularInline): model = Email extra = 1 class", "default_lon = 2 # default_lat = 42 default_zoom = 2 list_display = ('names',", "= 2 list_display = ('names', 'fulladdress', 'comments') inlines = [PhoneInline, EmailInline] admin.site.register(Hivernant, HivernantAdmin)", "300000 default_lat = 6000000 # default_lon = 2 # default_lat = 42 default_zoom", "Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra = 1 class EmailInline(admin.TabularInline): model", "6000000 # default_lon = 2 # default_lat = 42 default_zoom = 2 list_display", "= 42 default_zoom = 2 list_display = ('names', 'fulladdress', 'comments') inlines = [PhoneInline,", "from .models import Hivernant, Phone, Email class PhoneInline(admin.TabularInline): model = Phone extra =", "model = Email extra = 1 class HivernantAdmin(admin.OSMGeoAdmin): default_lon = 300000 default_lat =" ]
[ "json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60", "start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$", "context): rs=talk(request.sender, request.message) recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return", "import grpc import requests import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import", "ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips = [] for r in", "24 \"\"\" $ start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py", "self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\",", "# we enter a never-ending loop that waits for # data and runs", "SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request,", "Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\" $ start as", "as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py", "'{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json()", "sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\" $", "a never-ending loop that waits for # data and runs callbacks whenever necessary.", "self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def", "never-ending loop that waits for # data and runs callbacks whenever necessary. print(\"", "return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port", "started on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close()", "$ start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\"", "-m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender,", "} # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data))", "if __name__ == '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main #", "blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts", "__init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class", "in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context)", "import requests import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver", "#$ ./query.py talk \"/joke\" def talk(sender, a_str): headers = { 'content-type': 'application/json', }", "_ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\" $ start as # or:", "blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port) if blocking: try: while True:", "else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic", "for # data and runs callbacks whenever necessary. print(\" [*] Waiting for messages", "blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info')", "enter a never-ending loop that waits for # data and runs callbacks whenever", "__name__ == '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # #", "import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\" $ start", "that waits for # data and runs callbacks whenever necessary. print(\" [*] Waiting", "[] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context):", "blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24", "'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers,", "print(\".. artifacts servant started on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except", "from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\"", "== '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # # message", "loop.create_task(main(loop)) # we enter a never-ending loop that waits for # data and", "return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts", "serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port) if blocking: try: while", "# # message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter", "'__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver", "request, context): rs=talk(request.sender, request.message) recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text']))", "message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending", "necessary. print(\" [*] Waiting for messages ..\") # rpc and message sender s=Artifacts()", "context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return", "* 60 * 24 \"\"\" $ start as # or: python -m sagas.ofbiz.rpc_artifacts", "headers = { 'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str}", "{ 'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response =", "\"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class", "BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 * 24 \"\"\" $ start as #", "artifacts servant started on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt:", "KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': # import", "# from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver # loop = asyncio.get_event_loop()", "self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio #", "text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self, artifacts):", "port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self,", "recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self,", "'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def", "import futures import time import grpc import requests import json import blueprints_pb2_grpc import", "r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request,", "def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver()", "self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main", "self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port) if blocking:", "= asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending loop that waits for", "import time import grpc import requests import json import blueprints_pb2_grpc import blueprints_pb2 from", "def SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self,", "\"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str): headers = { 'content-type': 'application/json',", "self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port)", "request, context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context):", "rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context) def", "whenever necessary. print(\" [*] Waiting for messages ..\") # rpc and message sender", "self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else:", "headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender,", "request.message) recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def", "# else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio # from", "and runs callbacks whenever necessary. print(\" [*] Waiting for messages ..\") # rpc", "./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str): headers = {", "self.server.start() print(\".. artifacts servant started on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS)", "= { 'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response", "data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message)", "= [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request,", "receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending loop", "= grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start()", "60 * 60 * 24 \"\"\" $ start as # or: python -m", "talk \"/joke\" def talk(sender, a_str): headers = { 'content-type': 'application/json', } # data", "__init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def", "runs callbacks whenever necessary. print(\" [*] Waiting for messages ..\") # rpc and", "\"/joke\" def talk(sender, a_str): headers = { 'content-type': 'application/json', } # data =", "'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook',", "return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips = []", "\"\"\" $ start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk", "talk(sender, a_str): headers = { 'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}'", "def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self,", "blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port)", "python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def", "= requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request,", "self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True):", "True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ ==", "blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def", "artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object):", "grpc import requests import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard,", "asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending loop that waits for #", "recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return super().SetSlot(request, context) def __init__(self,", "def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port) if blocking: try:", "context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server =", "def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\")", "rs=talk(request.sender, request.message) recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips)", "self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant", "response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self,", "a_str): headers = { 'content-type': 'application/json', } # data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender,", "Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'):", "data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text)", "# data and runs callbacks whenever necessary. print(\" [*] Waiting for messages ..\")", "[*] Waiting for messages ..\") # rpc and message sender s=Artifacts() s.serve() #", "print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips =", "\"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str): headers", "# or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk", "%s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self),", "./query.py talk \"/joke\" def talk(sender, a_str): headers = { 'content-type': 'application/json', } #", "sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str):", "waits for # data and runs callbacks whenever necessary. print(\" [*] Waiting for", "# message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a", "Talk(self, request, context): rs=talk(request.sender, request.message) recips = [] for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'],", "or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\"", "grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\"..", "import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver # loop", "context): return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get", "request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server", "self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() # self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started", "futures import time import grpc import requests import json import blueprints_pb2_grpc import blueprints_pb2", "data and runs callbacks whenever necessary. print(\" [*] Waiting for messages ..\") #", "Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard() #", "try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if", "callbacks whenever necessary. print(\" [*] Waiting for messages ..\") # rpc and message", "60 * 24 \"\"\" $ start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\"", "# import asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver #", "blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60", "import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 *", "if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: #", "for r in rs: recips.append(blueprints_pb2.BotRecipient(id=r['recipient_id'], text=r['text'])) return blueprints_pb2.BotResponse(recipients=recips) def SetSlot(self, request, context): return", "concurrent import futures import time import grpc import requests import json import blueprints_pb2_grpc", "<reponame>samlet/stack from concurrent import futures import time import grpc import requests import json", "main # # message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we", "# self.receiver=BlackboardReceiver() def serve(self, blocking=True): self.server.start() print(\".. artifacts servant started on\", self.port) if", "* 24 \"\"\" $ start as # or: python -m sagas.ofbiz.rpc_artifacts \"\"\" #$", "# loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending loop that", "def talk(sender, a_str): headers = { 'content-type': 'application/json', } # data = '{'+'\"message\":", "loop that waits for # data and runs callbacks whenever necessary. print(\" [*]", "sagas.tests.bus.aio.receive_logs_topic import main # # message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop))", "time import grpc import requests import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard", "# loop.create_task(main(loop)) # we enter a never-ending loop that waits for # data", "class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server) self.port=port self.server.add_insecure_port(port) self.blackboard=Blackboard()", "self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) blueprints_pb2_grpc.add_ArtifactServiceServicer_to_server(ArtifactService(self), self.server)", "requests import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS", "print('get %s'%request) self.artifacts.blackboard.send(request.message) return blueprints_pb2.PingResponse(response=\"world\") class Artifacts(object): def __init__(self, port='0.0.0.0:20051'): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))", "while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__", "Waiting for messages ..\") # rpc and message sender s=Artifacts() s.serve() # loop.run_forever()", "return super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request)", "talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str): headers = { 'content-type':", "time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ == '__main__':", "requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context):", "# data = '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) #", "from concurrent import futures import time import grpc import requests import json import", "= 60 * 60 * 24 \"\"\" $ start as # or: python", "import json import blueprints_pb2_grpc import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS =", "print(\" [*] Waiting for messages ..\") # rpc and message sender s=Artifacts() s.serve()", "except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': #", "servant started on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0)", "class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips = [] for r", "= '{'+'\"message\": \"{}\"'.format(a_str)+'}' data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return", "def Talk(self, request, context): rs=talk(request.sender, request.message) recips = [] for r in rs:", "import blueprints_pb2 from sagas.ofbiz.blackboard import Blackboard, BlackboardReceiver _ONE_DAY_IN_SECONDS = 60 * 60 *", "from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver # loop = asyncio.get_event_loop() #", "import main # # message receiver # loop = asyncio.get_event_loop() # loop.create_task(main(loop)) #", "we enter a never-ending loop that waits for # data and runs callbacks", "self.server.stop(0) self.blackboard.close() # else: # self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio", "on\", self.port) if blocking: try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: self.server.stop(0) self.blackboard.close() #", "data={'sender':sender, 'message':a_str} response = requests.post('http://localhost:5005/webhooks/rest/webhook', headers=headers, data=json.dumps(data)) # print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer):", "# self.receiver.serve('anonymous.info') if __name__ == '__main__': # import asyncio # from sagas.tests.bus.aio.receive_logs_topic import", "loop = asyncio.get_event_loop() # loop.create_task(main(loop)) # we enter a never-ending loop that waits", "#$ ./query.py talk \"hi\" #$ ./query.py talk \"/joke\" def talk(sender, a_str): headers =", "# print(response.text) return response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips", "response.json() class ArtifactService(blueprints_pb2_grpc.ArtifactServiceServicer): def Talk(self, request, context): rs=talk(request.sender, request.message) recips = [] for", "asyncio # from sagas.tests.bus.aio.receive_logs_topic import main # # message receiver # loop =", "super().SetSlot(request, context) def __init__(self, artifacts): self.artifacts=artifacts def Ping(self, request, context): print('get %s'%request) self.artifacts.blackboard.send(request.message)" ]
[ "- Beginner/1789.py # URI Online Judge 1789 while True: try: N = int(input())", "# URI Online Judge 1789 while True: try: N = int(input()) entrada =", "i in input().split()] if max(entrada) < 10: print(1) elif max(entrada) >= 10 and", "Online Judge 1789 while True: try: N = int(input()) entrada = [int(i) for", "and max(entrada) < 20: print(2) elif max(entrada) >= 20: print(3) except EOFError: break", "URI Online Judge 1789 while True: try: N = int(input()) entrada = [int(i)", "[int(i) for i in input().split()] if max(entrada) < 10: print(1) elif max(entrada) >=", "N = int(input()) entrada = [int(i) for i in input().split()] if max(entrada) <", "= [int(i) for i in input().split()] if max(entrada) < 10: print(1) elif max(entrada)", "max(entrada) < 10: print(1) elif max(entrada) >= 10 and max(entrada) < 20: print(2)", "1789 while True: try: N = int(input()) entrada = [int(i) for i in", "while True: try: N = int(input()) entrada = [int(i) for i in input().split()]", "10 and max(entrada) < 20: print(2) elif max(entrada) >= 20: print(3) except EOFError:", "Beginner/1789.py # URI Online Judge 1789 while True: try: N = int(input()) entrada", "if max(entrada) < 10: print(1) elif max(entrada) >= 10 and max(entrada) < 20:", "input().split()] if max(entrada) < 10: print(1) elif max(entrada) >= 10 and max(entrada) <", "= int(input()) entrada = [int(i) for i in input().split()] if max(entrada) < 10:", "<filename>1 - Beginner/1789.py # URI Online Judge 1789 while True: try: N =", "in input().split()] if max(entrada) < 10: print(1) elif max(entrada) >= 10 and max(entrada)", "< 10: print(1) elif max(entrada) >= 10 and max(entrada) < 20: print(2) elif", "elif max(entrada) >= 10 and max(entrada) < 20: print(2) elif max(entrada) >= 20:", "for i in input().split()] if max(entrada) < 10: print(1) elif max(entrada) >= 10", "print(1) elif max(entrada) >= 10 and max(entrada) < 20: print(2) elif max(entrada) >=", ">= 10 and max(entrada) < 20: print(2) elif max(entrada) >= 20: print(3) except", "True: try: N = int(input()) entrada = [int(i) for i in input().split()] if", "int(input()) entrada = [int(i) for i in input().split()] if max(entrada) < 10: print(1)", "Judge 1789 while True: try: N = int(input()) entrada = [int(i) for i", "max(entrada) >= 10 and max(entrada) < 20: print(2) elif max(entrada) >= 20: print(3)", "entrada = [int(i) for i in input().split()] if max(entrada) < 10: print(1) elif", "try: N = int(input()) entrada = [int(i) for i in input().split()] if max(entrada)", "10: print(1) elif max(entrada) >= 10 and max(entrada) < 20: print(2) elif max(entrada)" ]
[ "logging import psycopg2 from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user():", "\"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor:", "def __init__(self, host, database, user, password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password,", "typing import Generator import os import logging import psycopg2 from tasks import DatabaseInterface", "data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8],", "gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER", "territories ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes", "processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\": id, \"file_checksum\": gazette_file_checksum}, ) self._connection.commit()", "in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self,", "\"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor()", "os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"]", "territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed", "return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return", "from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def", "= %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port): self._connection = psycopg2.connect( dbname=database,", "-> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES", "return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT", "gazettes to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking", "cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes", "def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class", "<filename>database/postgresql.py from typing import Generator import os import logging import psycopg2 from tasks", "from typing import Generator import os import logging import psycopg2 from tasks import", "os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), )", "\"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\":", "port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self, data):", "id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as", "format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\":", "get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power,", "str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED,", "\"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\":", "get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL(", "def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def", "-> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\":", "get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date,", "gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as", "SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at,", "return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() ->", "is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True WHERE id =", "return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(),", "territory_name, territories.state_code FROM gazettes INNER JOIN territories ON territories.id = gazettes.territory_id WHERE processed", "\"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\":", "data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES)", "get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host():", "DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES =", "\"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator:", "int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor:", "data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) ->", "as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\": id, \"file_checksum\": gazette_file_checksum}, )", "yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum:", "%(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port): self._connection = psycopg2.connect( dbname=database, user=user,", "gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute(", "return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(),", "Generator import os import logging import psycopg2 from tasks import DatabaseInterface def get_database_name():", "\"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self)", "get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor:", "more gazettes to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None:", "password=password, host=host, port=port ) def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1],", "user=user, password=password, host=host, port=port ) def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\":", "gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code", "data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for", "be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as", ") class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum,", "return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\":", "set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor()", "os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface:", "-> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data)", "get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(),", "= gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed =", "import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return", "def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(),", "INNER JOIN territories ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED =", "get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text,", "cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to", "os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"]", "gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM", "\"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\":", "as territory_name, territories.state_code FROM gazettes INNER JOIN territories ON territories.id = gazettes.territory_id WHERE", "= psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self, data): return {", "import logging import psycopg2 from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def", "host=host, port=port ) def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1], \"date\":", "} def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data", "get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number,", "to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum})", "gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN territories ON territories.id", "__init__(self, host, database, user, password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host,", "gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True", "PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url,", "get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface()", "return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return", "\"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\":", "psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self, data): return { \"id\":", "data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14],", "= %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port):", "\"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\":", "data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11],", "data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9],", "\"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id,", "gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN territories", "import Generator import os import logging import psycopg2 from tasks import DatabaseInterface def", "WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user,", "= True WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host,", "gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN territories ON territories.id =", "id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password,", "dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self, data): return { \"id\": data[0],", "UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True WHERE id = %(id)s AND", "data): return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4],", "password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self,", "data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def", "gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes", "data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6],", "get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port():", "gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN territories ON", "DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"]", "def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3],", "import psycopg2 from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return", "logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self, id: int,", "gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name", "territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN territories ON territories.id = gazettes.territory_id", "self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def format_gazette_data(self, data): return", "PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id,", "\"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query)", "import os import logging import psycopg2 from tasks import DatabaseInterface def get_database_name(): return", "def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in", "get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition,", "logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\": id, \"file_checksum\":", "os import logging import psycopg2 from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"]", "None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\": id,", "data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10],", "data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13],", "%(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port): self._connection", "gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed,", "{id}({gazette_file_checksum}) as processed\") with self._connection.cursor() as cursor: cursor.execute( self.UPDATE_GAZETTE_AS_PROCESSED, {\"id\": id, \"file_checksum\": gazette_file_checksum},", "gazettes SET processed = True WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\"", "logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be", "\"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], }", "territories.state_code FROM gazettes INNER JOIN territories ON territories.id = gazettes.territory_id WHERE processed is", "user, password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port ) def", "{ \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5],", "data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\": data[12],", "data[11], \"processed\": data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with", "processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True WHERE id", "def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def", "\"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\": data[11], \"processed\":", "False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True WHERE id = %(id)s", "tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password():", "file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port): self._connection = psycopg2.connect(", "class PostgreSQL(DatabaseInterface): SELECT_PENDING_GAZETTES = \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path,", "SET processed = True WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def", "def get_database_host(): return os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return", "create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(), get_database_user(), get_database_password(), get_database_port(), ) class PostgreSQL(DatabaseInterface):", "= \"\"\"UPDATE gazettes SET processed = True WHERE id = %(id)s AND file_checksum", "as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more", "\"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7], \"file_url\": data[8], \"scraped_at\": data[9], \"created_at\": data[10], \"territory_id\":", "FROM gazettes INNER JOIN territories ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\"", "self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No", "psycopg2 from tasks import DatabaseInterface def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"]", "processed = True WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self,", "AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database, user, password, port): self._connection =", "\"\"\"UPDATE gazettes SET processed = True WHERE id = %(id)s AND file_checksum =", "with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data)", "logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) ->", "self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str)", "= \"\"\"SELECT gazettes.id, gazettes.source_text, gazettes.date, gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at,", "gazettes INNER JOIN territories ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED", "\"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\":", "gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def", "gazettes.edition_number, gazettes.is_extra_edition, gazettes.power, gazettes.file_checksum, gazettes.file_path, gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name,", "host, database, user, password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port", "def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\") with", "def get_database_name(): return os.environ[\"POSTGRES_DB\"] def get_database_user(): return os.environ[\"POSTGRES_USER\"] def get_database_password(): return os.environ[\"POSTGRES_PASSWORD\"] def", "os.environ[\"POSTGRES_HOST\"] def get_database_port(): return os.environ[\"POSTGRES_PORT\"] def create_database_interface() -> DatabaseInterface: return PostgreSQL( get_database_host(), get_database_name(),", "processed\") def set_gazette_as_processed(self, id: int, gazette_file_checksum: str) -> None: logging.debug(f\"Marking {id}({gazette_file_checksum}) as processed\")", ") def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2], \"edition_number\":", "Generator: with self._connection.cursor() as cursor: cursor.execute(self.SELECT_PENDING_GAZETTES) logging.debug(cursor.query) for gazette_data in cursor: logging.debug(gazette_data) yield", "JOIN territories ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE", "WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET processed = True WHERE", "data[1], \"date\": data[2], \"edition_number\": data[3], \"is_extra_edition\": data[4], \"power\": data[5], \"file_checksum\": data[6], \"file_path\": data[7],", "database, user, password, port): self._connection = psycopg2.connect( dbname=database, user=user, password=password, host=host, port=port )", "cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\") def set_gazette_as_processed(self, id:", "gazettes.file_url, gazettes.scraped_at, gazettes.created_at, gazettes.territory_id, gazettes.processed, territories.name as territory_name, territories.state_code FROM gazettes INNER JOIN", "port=port ) def format_gazette_data(self, data): return { \"id\": data[0], \"source_text\": data[1], \"date\": data[2],", "True WHERE id = %(id)s AND file_checksum = %(file_checksum)s;\"\"\" def __init__(self, host, database,", "for gazette_data in cursor: logging.debug(gazette_data) yield self.format_gazette_data(gazette_data) logging.debug(\"No more gazettes to be processed\")", "ON territories.id = gazettes.territory_id WHERE processed is False;\"\"\" UPDATE_GAZETTE_AS_PROCESSED = \"\"\"UPDATE gazettes SET", "data[12], \"territory_name\": data[13], \"state_code\": data[14], } def get_pending_gazettes(self) -> Generator: with self._connection.cursor() as" ]
[ "# # require: # https://github.com/edburnett/twitter-text-python # # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump", "parser as date_parser # read csv to Dict with open('realdonaldtrump.csv', 'r') as f:", "f: reader = csv.DictReader(f, delimiter = ',') data = list(reader) # write to", "= parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time'] =", "item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file (more fields) with open('realdonaldtrump_more.json', 'w')", "# https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp import ttp from dateutil", "file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp import ttp", "# https://github.com/edburnett/twitter-text-python # # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json", "= result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file (more fields) with", "(same fields as csv) with open('realdonaldtrump.json', 'w') as f: for item in data:", "data: result = parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply'] = result.reply", "twitter csv process # write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python #", "to Dict with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter = ',')", "# get more info from text message parser = ttp.Parser() for item in", "+ '\\n') # get more info from text message parser = ttp.Parser() for", "# write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # # download csv", "json file (same fields as csv) with open('realdonaldtrump.json', 'w') as f: for item", "csv.DictReader(f, delimiter = ',') data = list(reader) # write to json file (same", "json file (more fields) with open('realdonaldtrump_more.json', 'w') as f: for item in data:", "csv) with open('realdonaldtrump.json', 'w') as f: for item in data: f.write(json.dumps(item) + '\\n')", "item['users'] = result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json", "item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file (more fields)", "in data: result = parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply'] =", "date_parser # read csv to Dict with open('realdonaldtrump.csv', 'r') as f: reader =", "read csv to Dict with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter", "item['tags'] = result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) #", "csv from ttp import ttp from dateutil import parser as date_parser # read", "Dict with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter = ',') data", "fields) with open('realdonaldtrump_more.json', 'w') as f: for item in data: f.write(json.dumps(item) + '\\n')", "fields as csv) with open('realdonaldtrump.json', 'w') as f: for item in data: f.write(json.dumps(item)", "= ',') data = list(reader) # write to json file (same fields as", "import csv from ttp import ttp from dateutil import parser as date_parser #", "delimiter = ',') data = list(reader) # write to json file (same fields", "# write to json file (more fields) with open('realdonaldtrump_more.json', 'w') as f: for", "reader = csv.DictReader(f, delimiter = ',') data = list(reader) # write to json", "message parser = ttp.Parser() for item in data: result = parser.parse(item['text']) item['tags'] =", "list(reader) # write to json file (same fields as csv) with open('realdonaldtrump.json', 'w')", "item in data: f.write(json.dumps(item) + '\\n') # get more info from text message", "with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter = ',') data =", "result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file (more fields) with open('realdonaldtrump_more.json',", "@jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # # download csv file from: #", "from ttp import ttp from dateutil import parser as date_parser # read csv", "f.write(json.dumps(item) + '\\n') # get more info from text message parser = ttp.Parser()", "# import json import csv from ttp import ttp from dateutil import parser", "text message parser = ttp.Parser() for item in data: result = parser.parse(item['text']) item['tags']", "# download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from", "https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp import ttp from dateutil import", "data = list(reader) # write to json file (same fields as csv) with", "info from text message parser = ttp.Parser() for item in data: result =", "parser = ttp.Parser() for item in data: result = parser.parse(item['text']) item['tags'] = result.tags", "parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at']))", "get more info from text message parser = ttp.Parser() for item in data:", "process # write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # # download", "= ttp.Parser() for item in data: result = parser.parse(item['text']) item['tags'] = result.tags item['users']", "item in data: result = parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply']", "= list(reader) # write to json file (same fields as csv) with open('realdonaldtrump.json',", "as f: for item in data: f.write(json.dumps(item) + '\\n') # get more info", "import json import csv from ttp import ttp from dateutil import parser as", "# # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv", "ttp from dateutil import parser as date_parser # read csv to Dict with", "csv process # write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # #", "result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file (more", "write to json file (more fields) with open('realdonaldtrump_more.json', 'w') as f: for item", "# read csv to Dict with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f,", "result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to", "f: for item in data: f.write(json.dumps(item) + '\\n') # get more info from", "in data: f.write(json.dumps(item) + '\\n') # get more info from text message parser", "= result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write", "# write to json file (same fields as csv) with open('realdonaldtrump.json', 'w') as", "for item in data: f.write(json.dumps(item) + '\\n') # get more info from text", "write to json file (same fields as csv) with open('realdonaldtrump.json', 'w') as f:", "write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # # download csv file", "by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python # # download csv file from:", "',') data = list(reader) # write to json file (same fields as csv)", "for item in data: result = parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users", "= result.users item['reply'] = result.reply item['tweet_time'] = str(date_parser.parse(item['created_at'])) # write to json file", "https://github.com/edburnett/twitter-text-python # # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import", "result = parser.parse(item['text']) item['tags'] = result.tags item['users'] = result.users item['reply'] = result.reply item['tweet_time']", "file (same fields as csv) with open('realdonaldtrump.json', 'w') as f: for item in", "csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp import", "'r') as f: reader = csv.DictReader(f, delimiter = ',') data = list(reader) #", "ttp import ttp from dateutil import parser as date_parser # read csv to", "as date_parser # read csv to Dict with open('realdonaldtrump.csv', 'r') as f: reader", "open('realdonaldtrump.json', 'w') as f: for item in data: f.write(json.dumps(item) + '\\n') # get", "'\\n') # get more info from text message parser = ttp.Parser() for item", "import parser as date_parser # read csv to Dict with open('realdonaldtrump.csv', 'r') as", "= csv.DictReader(f, delimiter = ',') data = list(reader) # write to json file", "csv to Dict with open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter =", "as f: reader = csv.DictReader(f, delimiter = ',') data = list(reader) # write", "str(date_parser.parse(item['created_at'])) # write to json file (more fields) with open('realdonaldtrump_more.json', 'w') as f:", "file (more fields) with open('realdonaldtrump_more.json', 'w') as f: for item in data: f.write(json.dumps(item)", "as csv) with open('realdonaldtrump.json', 'w') as f: for item in data: f.write(json.dumps(item) +", "# twitter csv process # write by @jiyang_viz # # require: # https://github.com/edburnett/twitter-text-python", "open('realdonaldtrump.csv', 'r') as f: reader = csv.DictReader(f, delimiter = ',') data = list(reader)", "dateutil import parser as date_parser # read csv to Dict with open('realdonaldtrump.csv', 'r')", "ttp.Parser() for item in data: result = parser.parse(item['text']) item['tags'] = result.tags item['users'] =", "to json file (same fields as csv) with open('realdonaldtrump.json', 'w') as f: for", "# # twitter csv process # write by @jiyang_viz # # require: #", "data: f.write(json.dumps(item) + '\\n') # get more info from text message parser =", "more info from text message parser = ttp.Parser() for item in data: result", "to json file (more fields) with open('realdonaldtrump_more.json', 'w') as f: for item in", "from dateutil import parser as date_parser # read csv to Dict with open('realdonaldtrump.csv',", "json import csv from ttp import ttp from dateutil import parser as date_parser", "require: # https://github.com/edburnett/twitter-text-python # # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import", "with open('realdonaldtrump.json', 'w') as f: for item in data: f.write(json.dumps(item) + '\\n') #", "'w') as f: for item in data: f.write(json.dumps(item) + '\\n') # get more", "= str(date_parser.parse(item['created_at'])) # write to json file (more fields) with open('realdonaldtrump_more.json', 'w') as", "# require: # https://github.com/edburnett/twitter-text-python # # download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump #", "(more fields) with open('realdonaldtrump_more.json', 'w') as f: for item in data: f.write(json.dumps(item) +", "from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp import ttp from", "import ttp from dateutil import parser as date_parser # read csv to Dict", "from text message parser = ttp.Parser() for item in data: result = parser.parse(item['text'])", "download csv file from: # https://github.com/bpb27/political_twitter_archive/tree/master/realdonaldtrump # import json import csv from ttp" ]
[ "return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\",", "json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return formatJsonresp print(\"Running the", "LastToken and LastTokenTime > now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload", "\"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict = { \"data\": { \"type\": \"interaction\",", "= False TokenLifespan = 58 * 60 def BearerTokenGrab(): global LastToken global LastTokenTime", "UserID }, \"input\": input } } } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse", "config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\":", "\"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\":", "LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a User ID for", "\"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers", "\"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers)", "> now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" +", "requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return", "= { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict = {", "{ \"id\": UserID }, \"input\": input } } } response = requests.post(url, headers=headers,", "= jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a User ID for the", "+ \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url,", "data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\"", "{ \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict = { \"data\":", "TokenLifespan = 58 * 60 def BearerTokenGrab(): global LastToken global LastTokenTime now =", "* 60 def BearerTokenGrab(): global LastToken global LastTokenTime now = time.time() if LastToken", "current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = {", "= \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \"", "headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates", "def BearerTokenGrab(): global LastToken global LastTokenTime now = time.time() if LastToken and LastTokenTime", "\"id\": UserID }, \"input\": input } } } response = requests.post(url, headers=headers, json=payloaddict)", "config import time #Creating and getting my bearer token LastToken = False LastTokenTime", "import requests import json from decouple import config import time #Creating and getting", "input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" +", "\"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID }, \"input\": input } }", "LastToken = False LastTokenTime = False TokenLifespan = 58 * 60 def BearerTokenGrab():", "58 * 60 def BearerTokenGrab(): global LastToken global LastTokenTime now = time.time() if", "\"Authorization\": \"Bearer \" + BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse =", "now return jsonResponse['access_token'] #Creates a User ID for the current user def CreateUserID():", "} } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse,", "LastTokenTime now = time.time() if LastToken and LastTokenTime > now - TokenLifespan: return", "\"Bearer \" + BearerTokenGrab() } payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\":", "json from decouple import config import time #Creating and getting my bearer token", "} response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime", "\"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken =", "time.time() if LastToken and LastTokenTime > now - TokenLifespan: return LastToken url =", "the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers =", "response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime =", "\"interaction\", \"attributes\": { \"user\": { \"id\": UserID }, \"input\": input } } }", "= False LastTokenTime = False TokenLifespan = 58 * 60 def BearerTokenGrab(): global", "} } } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp =", "response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True)", "= { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse", "\"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken", "my bearer token LastToken = False LastTokenTime = False TokenLifespan = 58 *", "{ \"user\": { \"id\": UserID }, \"input\": input } } } response =", "+ BearerTokenGrab() } payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\":", "#Creates a User ID for the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\"", "= \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = {", "jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\":", "getting my bearer token LastToken = False LastTokenTime = False TokenLifespan = 58", "{ \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse =", "jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a", "= requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url", "\"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload,", "data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token']", "return jsonResponse['access_token'] #Creates a User ID for the current user def CreateUserID(): url", "= response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = {", "requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return formatJsonresp", "user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\":", "payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID", "60 def BearerTokenGrab(): global LastToken global LastTokenTime now = time.time() if LastToken and", "LastTokenTime > now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\"", "payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers =", "+ config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\",", "headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers)", "{ \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response = requests.post(url, data=payload,", "\"input\": input } } } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json()", "TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\"", "response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a User ID", "LastTokenTime = False TokenLifespan = 58 * 60 def BearerTokenGrab(): global LastToken global", "\"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse", "\"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token']", "config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response =", "LastTokenTime = now return jsonResponse['access_token'] #Creates a User ID for the current user", "BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def", "import time #Creating and getting my bearer token LastToken = False LastTokenTime =", "def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\",", "requests import json from decouple import config import time #Creating and getting my", "getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \"", "\" + BearerTokenGrab() } payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\": {", "{ \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID }, \"input\":", "- TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") +", "jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a User ID for the current", "<filename>thebotplatform.py import requests import json from decouple import config import time #Creating and", "LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\")", "url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab()", "and getting my bearer token LastToken = False LastTokenTime = False TokenLifespan =", "} response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID,", "from decouple import config import time #Creating and getting my bearer token LastToken", "False LastTokenTime = False TokenLifespan = 58 * 60 def BearerTokenGrab(): global LastToken", "= { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response = requests.post(url,", "+ \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\"", "headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers", "jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return formatJsonresp print(\"Running the proxy", "response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\":", "global LastTokenTime now = time.time() if LastToken and LastTokenTime > now - TokenLifespan:", "False TokenLifespan = 58 * 60 def BearerTokenGrab(): global LastToken global LastTokenTime now", "#Creating and getting my bearer token LastToken = False LastTokenTime = False TokenLifespan", "a User ID for the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload", "\" + BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return", "jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers =", "now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\")", "return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" +", "\"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID }, \"input\": input", "if LastToken and LastTokenTime > now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\"", "} response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4,", "\"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict = { \"data\": {", "\"attributes\": { \"user\": { \"id\": UserID }, \"input\": input } } } response", "User ID for the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload =", "headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response =", "response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input):", "\"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json()", "\"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" +", "[] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } response", "= requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return", "BearerTokenGrab(): global LastToken global LastTokenTime now = time.time() if LastToken and LastTokenTime >", "\"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" }", "BearerTokenGrab() } payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": {", "} payloaddict = { \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\":", "import json from decouple import config import time #Creating and getting my bearer", "input } } } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp", "CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\":", "def getBotResponse(UserID, input): url = \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer", "}, \"input\": input } } } response = requests.post(url, headers=headers, json=payloaddict) jsonResponse =", "now = time.time() if LastToken and LastTokenTime > now - TokenLifespan: return LastToken", "time #Creating and getting my bearer token LastToken = False LastTokenTime = False", "{ \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID }, \"input\": input }", "\"Bearer \" + BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json()", "url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer", "global LastToken global LastTokenTime now = time.time() if LastToken and LastTokenTime > now", "and LastTokenTime > now - TokenLifespan: return LastToken url = \"https://api.thebotplatform.com/oauth2/token\" payload =", "import config import time #Creating and getting my bearer token LastToken = False", "token LastToken = False LastTokenTime = False TokenLifespan = 58 * 60 def", "= time.time() if LastToken and LastTokenTime > now - TokenLifespan: return LastToken url", "= now return jsonResponse['access_token'] #Creates a User ID for the current user def", "= [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab() }", "+ BearerTokenGrab() } response = requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id']", "requests.post(url, data=payload, headers=headers) jsonResponse = response.json() return jsonResponse['data']['attributes']['user']['id'] def getBotResponse(UserID, input): url =", "= \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\"", "= { \"data\": { \"type\": \"interaction\", \"attributes\": { \"user\": { \"id\": UserID },", "= response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return formatJsonresp print(\"Running the proxy server\")", "\"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict", "= 58 * 60 def BearerTokenGrab(): global LastToken global LastTokenTime now = time.time()", "\"user\": { \"id\": UserID }, \"input\": input } } } response = requests.post(url,", "headers=headers, json=payloaddict) jsonResponse = response.json() formatJsonresp = json.dumps(jsonResponse, indent=4, sort_keys=True) return formatJsonresp print(\"Running", "url = \"https://api.thebotplatform.com/oauth2/token\" payload = \"client_id=\" + config(\"TBP_CLIENT_ID\") + \"&client_secret=\" + config(\"TBP_CLIENT_SECRET\") +", "jsonResponse['access_token'] #Creates a User ID for the current user def CreateUserID(): url =", "ID for the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = []", "payload = [] headers = { \"Content-Type\": \"application/x-www-form-urlencoded\", \"Authorization\": \"Bearer \" + BearerTokenGrab()", "for the current user def CreateUserID(): url = \"https://api.thebotplatform.com/v1.0/interaction/user\" payload = [] headers", "bearer token LastToken = False LastTokenTime = False TokenLifespan = 58 * 60", "headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict =", "= \"https://api.thebotplatform.com/v1.0/interaction\" headers = { \"Content-Type\": \"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() }", "LastToken global LastTokenTime now = time.time() if LastToken and LastTokenTime > now -", "= response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now return jsonResponse['access_token'] #Creates a User", "+ config(\"TBP_CLIENT_SECRET\") + \"&grant_type=client_credentials\" headers = { \"Accept\": \"application/json\", \"Content-Type\": \"application/x-www-form-urlencoded\" } response", "= requests.post(url, data=payload, headers=headers) jsonResponse = response.json() LastToken = jsonResponse['access_token'] LastTokenTime = now", "\"application/json\", \"Authorization\": \"Bearer \" + BearerTokenGrab() } payloaddict = { \"data\": { \"type\":", "decouple import config import time #Creating and getting my bearer token LastToken =" ]
[ "input(\"Enter first number: \") b = input(\"Enter first number: \") sum_numbers = sum(int(a),int(b))", "number: \") b = input(\"Enter first number: \") sum_numbers = sum(int(a),int(b)) print(\"Sum: \",", "return a+b a = input(\"Enter first number: \") b = input(\"Enter first number:", "sum(a,b): return a+b a = input(\"Enter first number: \") b = input(\"Enter first", "= input(\"Enter first number: \") b = input(\"Enter first number: \") sum_numbers =", "\") b = input(\"Enter first number: \") sum_numbers = sum(int(a),int(b)) print(\"Sum: \", sum_numbers)", "first number: \") b = input(\"Enter first number: \") sum_numbers = sum(int(a),int(b)) print(\"Sum:", "def sum(a,b): return a+b a = input(\"Enter first number: \") b = input(\"Enter", "<gh_stars>1-10 def sum(a,b): return a+b a = input(\"Enter first number: \") b =", "a = input(\"Enter first number: \") b = input(\"Enter first number: \") sum_numbers", "a+b a = input(\"Enter first number: \") b = input(\"Enter first number: \")" ]
[ "== ( height, width), (image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator =", "[] @property def n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return self._batch", "+ ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True: if", "-*- coding: utf-8 -*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy as np", "= cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height = height self._width =", "images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size >", "= batch_size self._height = height self._width = width self._shuffle = shuffle self._flip_h =", "batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch = [] @property def n_samples(self):", "input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height,", "= [] @property def n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return", "isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2], (height, width)) print('verify '", "file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in file_index: image =", "' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True:", "* 2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch = [] @property", "image = image.transpose((2, 0, 1)) image = image.astype(np.float32) image = ((image / 255.)", "for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2]", "self._flip_h: if np.random.randint(2) == 0: image = image[:, ::-1] if image.ndim == 2:", "0, 1)) image = image.astype(np.float32) image = ((image / 255.) - 0.5) *", "else: image = image.transpose((2, 0, 1)) image = image.astype(np.float32) image = ((image /", "channel == 1, channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag", "def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0,", "if len(batch) == self._batch_size: yield np.asarray(batch) batch = [] @property def n_samples(self): return", "np.random.randint(2) == 0: image = image[:, ::-1] if image.ndim == 2: image =", "self._input_files = input_files self._batch_size = batch_size self._height = height self._width = width self._shuffle", "height self._width = width self._shuffle = shuffle self._flip_h = flip_h for ifile in", "image = image.reshape((1,) + image.shape) else: image = image.transpose((2, 0, 1)) image =", "assert image.shape[:2] == ( height, width), (image.shape[:2], (height, width)) print('verify ' + ifile)", "1, channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE", "shuffle self._flip_h = flip_h for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert", "assert channel == 3 or channel == 1, channel if channel == 3:", "channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files", "if np.random.randint(2) == 0: image = image[:, ::-1] if image.ndim == 2: image", "if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in file_index:", "np class ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\" def __init__(self, input_files,", "flip_h for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert", "self._width = width self._shuffle = shuffle self._flip_h = flip_h for ifile in input_files:", "numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\" def", "print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = [] while", "len(batch) == self._batch_size: yield np.asarray(batch) batch = [] @property def n_samples(self): return len(self._input_files)", "cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2], (height,", "if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files =", "( height, width), (image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator()", "idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0:", "image[:, ::-1] if image.ndim == 2: image = image.reshape((1,) + image.shape) else: image", "range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2)", "== 1, channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag =", "self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height = height self._width", "cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for training on general", "= cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height", "cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2], (height, width))", "- 0.5) * 2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch =", "batch_size assert channel == 3 or channel == 1, channel if channel ==", "batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert channel", "else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height = height", "> 0, batch_size assert channel == 3 or channel == 1, channel if", "255.) - 0.5) * 2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch", "generator definition.\"\"\" import cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for", "True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in", "(height, width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch =", "yield np.asarray(batch) batch = [] @property def n_samples(self): return len(self._input_files) def __next__(self): self._batch", "flip_h=False): assert batch_size > 0, batch_size assert channel == 3 or channel ==", "[] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for", "assert batch_size > 0, batch_size assert channel == 3 or channel == 1,", "= shuffle self._flip_h = flip_h for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED)", "\"\"\"Batch generator definition.\"\"\" import cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator", "== 2: image = image.reshape((1,) + image.shape) else: image = image.transpose((2, 0, 1))", "if image.ndim == 2: image = image.reshape((1,) + image.shape) else: image = image.transpose((2,", "width), (image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self):", "batch_size > 0, batch_size assert channel == 3 or channel == 1, channel", "in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == (", "cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height = height self._width = width", "self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in file_index: image", "def __get_batch_generator(self): batch = [] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else:", "image = image.astype(np.float32) image = ((image / 255.) - 0.5) * 2. batch.append(image)", "= flip_h for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray)", "for training on general images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False,", "__get_batch_generator(self): batch = [] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index", "batch = [] @property def n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator)", "return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return self._batch def next(self): return self.__next__()", "as np class ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\" def __init__(self,", "channel == 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files", "cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image = image[:, ::-1] if", "input_files self._batch_size = batch_size self._height = height self._width = width self._shuffle = shuffle", "ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] ==", "generator for training on general images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3,", "assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2], (height, width)) print('verify", "channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert channel == 3 or", "0.5) * 2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch = []", "::-1] if image.ndim == 2: image = image.reshape((1,) + image.shape) else: image =", "width self._shuffle = shuffle self._flip_h = flip_h for ifile in input_files: image =", "on general images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert", "self._flip_h = flip_h for ifile in input_files: image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image,", "batch = [] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index =", "self._height = height self._width = width self._shuffle = shuffle self._flip_h = flip_h for", "n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return self._batch def next(self): return", "width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = []", "image = ((image / 255.) - 0.5) * 2. batch.append(image) if len(batch) ==", "__init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size", "= self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True: if self._shuffle: file_index =", "= np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx],", "shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert channel == 3 or channel", "image.ndim == 2: image = image.reshape((1,) + image.shape) else: image = image.transpose((2, 0,", "= range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if", "@property def n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return self._batch def", "self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples)", "image.astype(np.float32) image = ((image / 255.) - 0.5) * 2. batch.append(image) if len(batch)", "((image / 255.) - 0.5) * 2. batch.append(image) if len(batch) == self._batch_size: yield", "image.transpose((2, 0, 1)) image = image.astype(np.float32) image = ((image / 255.) - 0.5)", "ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True: if self._shuffle:", "height, width), (image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def", "self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image = image[:, ::-1] if image.ndim", "= cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image = image[:, ::-1]", "0: image = image[:, ::-1] if image.ndim == 2: image = image.reshape((1,) +", "training on general images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False):", "self._batch_size: yield np.asarray(batch) batch = [] @property def n_samples(self): return len(self._input_files) def __next__(self):", "# -*- coding: utf-8 -*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy as", "== self._batch_size: yield np.asarray(batch) batch = [] @property def n_samples(self): return len(self._input_files) def", "np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2], (height, width)) print('verify ' +", "image.shape[:2] == ( height, width), (image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator", "\"\"\"Batch generator for training on general images.\"\"\" def __init__(self, input_files, batch_size, height, width,", "= [] while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples)", "= ((image / 255.) - 0.5) * 2. batch.append(image) if len(batch) == self._batch_size:", "class ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\" def __init__(self, input_files, batch_size,", "input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert", "else: file_index = range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if", "image = image[:, ::-1] if image.ndim == 2: image = image.reshape((1,) + image.shape)", "file_index = range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h:", "or channel == 1, channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR else:", "general images.\"\"\" def __init__(self, input_files, batch_size, height, width, channel=3, shuffle=False, flip_h=False): assert batch_size", "3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size =", "image = cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width),", "0, batch_size assert channel == 3 or channel == 1, channel if channel", "= cv2.imread(ifile, cv2.IMREAD_UNCHANGED) assert isinstance(image, np.ndarray) assert image.shape[:2] == ( height, width), (image.shape[:2],", "= image.reshape((1,) + image.shape) else: image = image.transpose((2, 0, 1)) image = image.astype(np.float32)", "= width self._shuffle = shuffle self._flip_h = flip_h for ifile in input_files: image", "height, width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert channel ==", "= image.transpose((2, 0, 1)) image = image.astype(np.float32) image = ((image / 255.) -", "= image.astype(np.float32) image = ((image / 255.) - 0.5) * 2. batch.append(image) if", "== 3: self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size", "definition.\"\"\" import cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for training", "== 3 or channel == 1, channel if channel == 3: self._imread_flag =", "width, channel=3, shuffle=False, flip_h=False): assert batch_size > 0, batch_size assert channel == 3", "/ 255.) - 0.5) * 2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch)", "== 0: image = image[:, ::-1] if image.ndim == 2: image = image.reshape((1,)", "coding: utf-8 -*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy as np class", "2. batch.append(image) if len(batch) == self._batch_size: yield np.asarray(batch) batch = [] @property def", "self._imread_flag = cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size", "if self._flip_h: if np.random.randint(2) == 0: image = image[:, ::-1] if image.ndim ==", "= image[:, ::-1] if image.ndim == 2: image = image.reshape((1,) + image.shape) else:", "channel == 3 or channel == 1, channel if channel == 3: self._imread_flag", "def n_samples(self): return len(self._input_files) def __next__(self): self._batch = next(self._batch_generator) return self._batch def next(self):", "= input_files self._batch_size = batch_size self._height = height self._width = width self._shuffle =", "2: image = image.reshape((1,) + image.shape) else: image = image.transpose((2, 0, 1)) image", "self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch = [] while True: if self._shuffle: file_index", "+ image.shape) else: image = image.transpose((2, 0, 1)) image = image.astype(np.float32) image =", "1)) image = image.astype(np.float32) image = ((image / 255.) - 0.5) * 2.", "utf-8 -*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy as np class ImageBatchGenerator(object):", "self._shuffle = shuffle self._flip_h = flip_h for ifile in input_files: image = cv2.imread(ifile,", "ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\" def __init__(self, input_files, batch_size, height,", "cv2.IMREAD_COLOR else: self._imread_flag = cv2.IMREAD_GRAYSCALE self._input_files = input_files self._batch_size = batch_size self._height =", "batch_size self._height = height self._width = width self._shuffle = shuffle self._flip_h = flip_h", "image.shape) else: image = image.transpose((2, 0, 1)) image = image.astype(np.float32) image = ((image", "self._batch_size = batch_size self._height = height self._width = width self._shuffle = shuffle self._flip_h", "image.reshape((1,) + image.shape) else: image = image.transpose((2, 0, 1)) image = image.astype(np.float32) image", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy", "(image.shape[:2], (height, width)) print('verify ' + ifile) self._batch_generator = self.__get_batch_generator() def __get_batch_generator(self): batch", "for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) ==", "np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag)", "import cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for training on", "import numpy as np class ImageBatchGenerator(object): \"\"\"Batch generator for training on general images.\"\"\"", "in file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image", "file_index: image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image =", "image = cv2.imread(self._input_files[idx], self._imread_flag) if self._flip_h: if np.random.randint(2) == 0: image = image[:,", "= height self._width = width self._shuffle = shuffle self._flip_h = flip_h for ifile", "while True: if self._shuffle: file_index = np.random.permutation(self.n_samples) else: file_index = range(self.n_samples) for idx", "np.asarray(batch) batch = [] @property def n_samples(self): return len(self._input_files) def __next__(self): self._batch =", "3 or channel == 1, channel if channel == 3: self._imread_flag = cv2.IMREAD_COLOR", "-*- \"\"\"Batch generator definition.\"\"\" import cv2 import numpy as np class ImageBatchGenerator(object): \"\"\"Batch" ]
[ "os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser =", "help=\"create a backup of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\",", "os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path", "args) = parser.parse_args() if options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif len(args)", "\"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\",", "parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the uncorrupted original\" \"", "f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\",", "with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog", "to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup", "parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup of the uncorrupted original.\")", "of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of", "len(args) != 3: parser.print_help() sys.exit(1) path = args[0] revert(path) if not options.revert: offset", "buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value')", "original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup", "(options, args) = parser.parse_args() if options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif", "= parser.parse_args() if options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif len(args) !=", "original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the file\" \"", "the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the", "options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1)", "file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine condition", "\" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup of", "args[0] revert(path) if not options.revert: offset = int(eval(args[1])) value = int(eval(args[2])) corrupt(path, offset,", "condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the", "[default: %default]\") (options, args) = parser.parse_args() if options.revert: if len(args) != 1: parser.print_help()", "pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of", "f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\",", "os import shutil import sys def revert(path): bkup = path + os.path.extsep +", "dest=\"backup\", help=\"do not create a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\",", "offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path, 'r+b',", "import shutil import sys def revert(path): bkup = path + os.path.extsep + options.backup_suffix", "sys.exit(1) path = args[0] revert(path) if not options.revert: offset = int(eval(args[1])) value =", "replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine condition if possible.\")", "default=\"pristine\", help=\"suffix for uncorrupted copy of the file\" \" [default: %default]\") (options, args)", "parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine condition if possible.\") parser.add_option(\"-b\",", "+ options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if", "action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the uncorrupted original\" \" [default: %default]\")", "shutil import sys def revert(path): bkup = path + os.path.extsep + options.backup_suffix if", "path + os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value)))", "= args[0] revert(path) if not options.revert: offset = int(eval(args[1])) value = int(eval(args[2])) corrupt(path,", "as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\",", "os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value):", "path = args[0] revert(path) if not options.revert: offset = int(eval(args[1])) value = int(eval(args[2]))", "help=\"restore the path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\",", "copy of the file\" \" [default: %default]\") (options, args) = parser.parse_args() if options.revert:", "open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file", "uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a", "= path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def", "backup of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do", "[default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup of the", "a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted", "revert(path) if not options.revert: offset = int(eval(args[1])) value = int(eval(args[2])) corrupt(path, offset, value)", "file\" \" [default: %default]\") (options, args) = parser.parse_args() if options.revert: if len(args) !=", "possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the uncorrupted original\"", "path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path,", "help=\"suffix for uncorrupted copy of the file\" \" [default: %default]\") (options, args) =", "for uncorrupted copy of the file\" \" [default: %default]\") (options, args) = parser.parse_args()", "usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to", "+ os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser", "if options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help()", "of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not", "import os import shutil import sys def revert(path): bkup = path + os.path.extsep", "import sys def revert(path): bkup = path + os.path.extsep + options.backup_suffix if os.access(bkup,", "options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if options.backup:", "sys def revert(path): bkup = path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK):", "elif len(args) != 3: parser.print_help() sys.exit(1) path = args[0] revert(path) if not options.revert:", "f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False,", "uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the file\"", "the path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create", "= optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the", "corrupt(path, offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path,", "not create a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix", "if len(args) != 1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path", "os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path +", "shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset)", "sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path = args[0] revert(path) if not", "if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the uncorrupted", "'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage: %prog file byte-offset", "bkup = path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup)", "if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path,", "1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path = args[0] revert(path)", "#!/usr/bin/env python import optparse import os import shutil import sys def revert(path): bkup", "of the file\" \" [default: %default]\") (options, args) = parser.parse_args() if options.revert: if", "+ options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser(", "!= 1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path = args[0]", "create a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for", "!= 3: parser.print_help() sys.exit(1) path = args[0] revert(path) if not options.revert: offset =", "os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix)", "byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine condition if", "%default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup of the uncorrupted", "python import optparse import os import shutil import sys def revert(path): bkup =", "shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep", "\"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the file\" \" [default: %default]\")", "the file\" \" [default: %default]\") (options, args) = parser.parse_args() if options.revert: if len(args)", "\" [default: %default]\") (options, args) = parser.parse_args() if options.revert: if len(args) != 1:", "parser.parse_args() if options.revert: if len(args) != 1: parser.print_help() sys.exit(1) elif len(args) != 3:", "%prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path to pristine", "options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as f:", "path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a", "default=False, help=\"restore the path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\", default=True,", "the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create", "uncorrupted copy of the file\" \" [default: %default]\") (options, args) = parser.parse_args() if", "help=\"do not create a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\",", "parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path = args[0] revert(path) if", "parser.print_help() sys.exit(1) path = args[0] revert(path) if not options.revert: offset = int(eval(args[1])) value", "\"--backup\", action=\"store_true\", default=True, dest=\"backup\", help=\"create a backup of the uncorrupted original\" \" [default:", "optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore the path", "\"--no-backup\", action=\"store_false\", dest=\"backup\", help=\"do not create a backup of the uncorrupted original.\") parser.add_option(\"\",", "optparse import os import shutil import sys def revert(path): bkup = path +", "backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy", "3: parser.print_help() sys.exit(1) path = args[0] revert(path) if not options.revert: offset = int(eval(args[1]))", "path) os.remove(bkup) def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep +", "value): if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0)", "parser.add_option(\"\", \"--backup-suffix\", type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the file\" \" [default:", "dest=\"backup\", help=\"create a backup of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\",", "default=True, dest=\"backup\", help=\"create a backup of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\",", "import optparse import os import shutil import sys def revert(path): bkup = path", "a backup of the uncorrupted original\" \" [default: %default]\") parser.add_option(\"\", \"--no-backup\", action=\"store_false\", dest=\"backup\",", "+ os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path) os.remove(bkup) def corrupt(path, offset,", "def corrupt(path, offset, value): if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with", "type=\"string\", default=\"pristine\", help=\"suffix for uncorrupted copy of the file\" \" [default: %default]\") (options,", "parser = optparse.OptionParser( usage='usage: %prog file byte-offset replacment-value') parser.add_option(\"\", \"--revert\", action=\"store_true\", default=False, help=\"restore", "%default]\") (options, args) = parser.parse_args() if options.revert: if len(args) != 1: parser.print_help() sys.exit(1)", "action=\"store_false\", dest=\"backup\", help=\"do not create a backup of the uncorrupted original.\") parser.add_option(\"\", \"--backup-suffix\",", "len(args) != 1: parser.print_help() sys.exit(1) elif len(args) != 3: parser.print_help() sys.exit(1) path =", "if options.backup: shutil.copy2(path, path + os.path.extsep + options.backup_suffix) with open(path, 'r+b', buffering=0) as", "action=\"store_true\", default=False, help=\"restore the path to pristine condition if possible.\") parser.add_option(\"-b\", \"--backup\", action=\"store_true\",", "def revert(path): bkup = path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup,", "options.backup_suffix) with open(path, 'r+b', buffering=0) as f: f.seek(offset) f.write(bytes(chr(value))) parser = optparse.OptionParser( usage='usage:", "revert(path): bkup = path + os.path.extsep + options.backup_suffix if os.access(bkup, os.R_OK): shutil.copy2(bkup, path)" ]
[ "# # Permission is hereby granted, free of charge, to any person #", "parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ =", "+ txt + \":\" return d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type':", "d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m):", "m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError,", "# Permission is hereby granted, free of charge, to any person # obtaining", "= pat[1](m_) break if result is None: raise RuntimeError, \"Cannot parse activity detail\"", "desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date =", ":\" + txt + \":\" return d def parse_activity(m, statements): desc = m.group(2).strip()", "restriction, including without limitation the rights to use, # copy, modify, merge, publish,", "m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise", "= [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\",", "[(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats)", "pat[1]), start_pats) statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip()", "\":\" return d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc})", "OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2:", "pats: m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if result is", "desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return", "deal in the Software without # restriction, including without limitation the rights to", "statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN", "# copies of the Software, and to permit persons to whom the #", "(u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats)", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #", "m2 = pat2.match(txt) if m2: d = m2.groupdict() d_ = {} for k,", ", (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width),", "d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d = m2.groupdict()", "None else: raise RuntimeError, \"cannot parse date\" + \" :\" + txt +", "[(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution),", "conditions: # # The above copyright notice and this permission notice shall be", "\" :\" + txt + \":\" return d def parse_activity(m, statements): desc =", "start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for line in", "# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO", "'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def", "if result is None: raise RuntimeError, \"Cannot parse activity detail\" if len(statements) ==", "else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip())", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "\"There is no activity before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements):", "pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None for pat in", "'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution',", "this permission notice shall be # included in all copies or substantial portions", "if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError,", "line != '': for pat in start_pats: m = pat[0].match(line) if m: pat[1](m,", "THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "of charge, to any person # obtaining a copy of this software and", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY", "int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column width\" def parse(txt): start_pats =", "= None for pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result =", "RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_:", "int(v) d = d_ d['day'] = None else: raise RuntimeError, \"cannot parse date\"", "'1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column width\" def parse(txt):", "d = d_ d['day'] = None else: raise RuntimeError, \"cannot parse date\" +", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS", "(u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats", "'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m):", "parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else:", "def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date':", "m_: result = pat[1](m_) break if result is None: raise RuntimeError, \"Cannot parse", "parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year':", "without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense,", "d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d", "if line != '': for pat in start_pats: m = pat[0].match(line) if m:", "THE SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "!= '': for pat in start_pats: m = pat[0].match(line) if m: pat[1](m, statements)", "statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise", "m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #", "statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0,", "copies of the Software, and to permit persons to whom the # Software", "return ('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\",", "to the following # conditions: # # The above copyright notice and this", "Permission is hereby granted, free of charge, to any person # obtaining a", "def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0,", "def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))})", "m2.groupdict() d_ = {} for k, v in d.items(): d_[k] = int(v) d", "for k, v in d.items(): d_[k] = int(v) d = d_ d['day'] =", "sublicense, and/or sell # copies of the Software, and to permit persons to", "parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None", "u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat:", "raise RuntimeError, \"Cannot parse activity detail\" if len(statements) == 0 or statements[-1]['type'] !=", "re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d = m.groupdict() d_ = {} for", "parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d = m.groupdict() d_", "'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date", "parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = []", "= parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent)", "= None else: raise RuntimeError, \"cannot parse date\" + \" :\" + txt", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d =", "= map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None for pat in pats:", "None: raise RuntimeError, \"Cannot parse activity detail\" if len(statements) == 0 or statements[-1]['type']", "software and associated documentation # files (the \"Software\"), to deal in the Software", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d = m2.groupdict() d_", "no activity before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start',", "documentation # files (the \"Software\"), to deal in the Software without # restriction,", "in d.items(): d_[k] = int(v) d = d_ d['day'] = None else: raise", "parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width',", "in all copies or substantial portions of the Software. # # THE SOFTWARE", "m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month':", "including without limitation the rights to use, # copy, modify, merge, publish, distribute,", "parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day':", "pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]),", "m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\"", "persons to whom the # Software is furnished to do so, subject to", "def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip())", "shall be # included in all copies or substantial portions of the Software.", "in d.items(): d_[k] = int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2", "re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else:", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT", "ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "is None: raise RuntimeError, \"Cannot parse activity detail\" if len(statements) == 0 or", "\"Cannot parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type':", "any person # obtaining a copy of this software and associated documentation #", "CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE", "RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_:", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY,", "CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH", "('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent", "re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse", "'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_", "= [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line !=", "# Copyright 2009 <NAME> # # Permission is hereby granted, free of charge,", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "notice shall be # included in all copies or substantial portions of the", "is no activity before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type':", "d_[k] = int(v) d = d_ d['day'] = None else: raise RuntimeError, \"cannot", "AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "so, subject to the following # conditions: # # The above copyright notice", "= m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start',", "of this software and associated documentation # files (the \"Software\"), to deal in", "pats) result = None for pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_:", "permission notice shall be # included in all copies or substantial portions of", "and this permission notice shall be # included in all copies or substantial", "0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no activity before activity", "= line.strip() if line != '': for pat in start_pats: m = pat[0].match(line)", "parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for line", "if m: d = m.groupdict() d_ = {} for k, v in d.items():", "date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent =", "RuntimeError, \"There is no activity before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m,", "# # Copyright 2009 <NAME> # # Permission is hereby granted, free of", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL", "-*- # # Copyright 2009 <NAME> # # Permission is hereby granted, free", "+ \":\" return d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc':", "OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION", "parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def", "'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_", "return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m,", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "associated documentation # files (the \"Software\"), to deal in the Software without #", "Software without # restriction, including without limitation the rights to use, # copy,", "def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip())", "statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "obtaining a copy of this software and associated documentation # files (the \"Software\"),", "'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_ =", "!= 'activity': raise RuntimeError, \"There is no activity before activity detail\" statements[-1][result[0]] =", "parse date\" + \" :\" + txt + \":\" return d def parse_activity(m,", "RuntimeError, \"cannot parse date\" + \" :\" + txt + \":\" return d", "parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION", "SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m:", "m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError,", "'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_ =", "return d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def", "A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date =", "if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column", "# OTHER DEALINGS IN THE SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$')", "raise RuntimeError, \"cannot parse date\" + \" :\" + txt + \":\" return", "def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats =", "d = m2.groupdict() d_ = {} for k, v in d.items(): d_[k] =", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR", "(u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for", "= re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))})", "resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value':", "furnished to do so, subject to the following # conditions: # # The", "parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def", "# restriction, including without limitation the rights to use, # copy, modify, merge,", "parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end", "OR # OTHER DEALINGS IN THE SOFTWARE. import re def parse_date(txt): pat =", "OTHER DEALINGS IN THE SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m", "first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\",", "), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]),", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN", "parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda", "the Software, and to permit persons to whom the # Software is furnished", "FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "d_[k] = int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt)", "is furnished to do so, subject to the following # conditions: # #", "pat[1]), pats) result = None for pat in pats: m_ = pat[0].match(m.group(2).strip()) if", "2009 <NAME> # # Permission is hereby granted, free of charge, to any", "to whom the # Software is furnished to do so, subject to the", "OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if result", "LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_:", "this software and associated documentation # files (the \"Software\"), to deal in the", "\"Cannot parse activity detail\" if len(statements) == 0 or statements[-1]['type'] != 'activity': raise", "subject to the following # conditions: # # The above copyright notice and", "= result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type':", "copy of this software and associated documentation # files (the \"Software\"), to deal", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip())", "Software is furnished to do so, subject to the following # conditions: #", "(u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result =", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF", "SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import re", "to do so, subject to the following # conditions: # # The above", "for pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break", "UTF-8 -*- # # Copyright 2009 <NAME> # # Permission is hereby granted,", "d = m.groupdict() d_ = {} for k, v in d.items(): d_[k] =", "TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE", "result = None for pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result", "def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\",", "if m_: result = pat[1](m_) break if result is None: raise RuntimeError, \"Cannot", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "m2: d = m2.groupdict() d_ = {} for k, v in d.items(): d_[k]", "parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None for pat", "pat: (re.compile(pat[0]), pat[1]), pats) result = None for pat in pats: m_ =", "txt + \":\" return d def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity',", "'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements):", "activity detail\" if len(statements) == 0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There", "statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements):", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\"", "OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import re def", "files (the \"Software\"), to deal in the Software without # restriction, including without", "m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if result is None:", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or", "else: raise RuntimeError, \"Cannot parse first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\",", "= map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for line in txt.replace(\"\\r\",", "NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "\"Cannot parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type':", "= pat2.match(txt) if m2: d = m2.groupdict() d_ = {} for k, v", "\"cannot parse date\" + \" :\" + txt + \":\" return d def", "COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, #", "= parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date)", "= int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if", "start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ),", "statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat:", "activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def", "= d_ d['day'] = None else: raise RuntimeError, \"cannot parse date\" + \"", "= m2.groupdict() d_ = {} for k, v in d.items(): d_[k] = int(v)", "int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end),", "or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no activity before activity detail\"", "and associated documentation # files (the \"Software\"), to deal in the Software without", "IN THE SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt)", "hereby granted, free of charge, to any person # obtaining a copy of", "<NAME> # # Permission is hereby granted, free of charge, to any person", "TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #", "k, v in d.items(): d_[k] = int(v) d = d_ d['day'] = None", "+ \" :\" + txt + \":\" return d def parse_activity(m, statements): desc", "= re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT,", "<reponame>veer66/vtimeline #-*- coding: UTF-8 -*- # # Copyright 2009 <NAME> # # Permission", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "def parse_activity(m, statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "raise RuntimeError, \"There is no activity before activity detail\" statements[-1][result[0]] = result[1] def", "if m2: d = m2.groupdict() d_ = {} for k, v in d.items():", "column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start),", "def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d = m.groupdict()", "raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if", "percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start),", "statements): desc = m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip())", "OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "line = line.strip() if line != '': for pat in start_pats: m =", "in the Software without # restriction, including without limitation the rights to use,", "re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d = m2.groupdict() d_ = {} for", "= int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\",", "and/or sell # copies of the Software, and to permit persons to whom", "charge, to any person # obtaining a copy of this software and associated", "(re.compile(pat[0]), pat[1]), start_pats) statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line =", "\"\").split(\"\\n\"): line = line.strip() if line != '': for pat in start_pats: m", "None for pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_)", "person # obtaining a copy of this software and associated documentation # files", "if len(statements) == 0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no", "statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column width\" def", "parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return", "granted, free of charge, to any person # obtaining a copy of this", "statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse", "EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM,", "date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end',", "'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements):", "line.strip() if line != '': for pat in start_pats: m = pat[0].match(line) if", "k, v in d.items(): d_[k] = int(v) d = d_ else: pat2 =", "line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line != '': for pat", "(u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\",", "if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def", "{} for k, v in d.items(): d_[k] = int(v) d = d_ else:", "pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d = m.groupdict() d_ =", "def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats =", "permit persons to whom the # Software is furnished to do so, subject", "raise RuntimeError, \"Cannot parse first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity)", "re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse", "(the \"Software\"), to deal in the Software without # restriction, including without limitation", "else: raise RuntimeError, \"cannot parse date\" + \" :\" + txt + \":\"", "width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), (", "DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR", "def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))})", "DEALINGS IN THE SOFTWARE. import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m =", "= re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d = m.groupdict() d_ = {}", "percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats", "len(statements) == 0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no activity", "notice and this permission notice shall be # included in all copies or", "# Software is furnished to do so, subject to the following # conditions:", "# # The above copyright notice and this permission notice shall be #", "parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width',", "whom the # Software is furnished to do so, subject to the following", "{} for k, v in d.items(): d_[k] = int(v) d = d_ d['day']", "m.groupdict() d_ = {} for k, v in d.items(): d_[k] = int(v) d", "('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)]", "(re.compile(pat[0]), pat[1]), pats) result = None for pat in pats: m_ = pat[0].match(m.group(2).strip())", "m_: statements.append({'type': '1st_col_width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column width\"", "int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\",", "statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if", "THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import re def parse_date(txt):", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "(u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements =", "for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line != '': for", "statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line", "v in d.items(): d_[k] = int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$')", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS", "== 0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no activity before", "( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF", "# included in all copies or substantial portions of the Software. # #", "result is None: raise RuntimeError, \"Cannot parse activity detail\" if len(statements) == 0", "return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m):", "= re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot", "('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements):", "copyright notice and this permission notice shall be # included in all copies", "d.items(): d_[k] = int(v) d = d_ d['day'] = None else: raise RuntimeError,", "# obtaining a copy of this software and associated documentation # files (the", "0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_", "parse first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail),", "THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT.", "statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements):", "'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\", m.group(2).strip()) if m_: statements.append({'type':", "for pat in start_pats: m = pat[0].match(line) if m: pat[1](m, statements) break return", "\"Software\"), to deal in the Software without # restriction, including without limitation the", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A", "date = parse_date(m.groupdict()['date'].strip()) return ('end', date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete',", "= d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d =", "parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats = map(lambda pat: (re.compile(pat[0]),", "THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. import", "coding: UTF-8 -*- # # Copyright 2009 <NAME> # # Permission is hereby", "0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN", "start_pats) statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "is hereby granted, free of charge, to any person # obtaining a copy", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #", "date\" + \" :\" + txt + \":\" return d def parse_activity(m, statements):", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR", "# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,", "# The above copyright notice and this permission notice shall be # included", "def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m, statements): m_ = re.match(u\"^(\\d+)\\s+เดือน$\",", "included in all copies or substantial portions of the Software. # # THE", "pat.match(txt) if m: d = m.groupdict() d_ = {} for k, v in", "USE OR # OTHER DEALINGS IN THE SOFTWARE. import re def parse_date(txt): pat", "= int(v) d = d_ d['day'] = None else: raise RuntimeError, \"cannot parse", "parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)] start_pats =", "activity before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date':", "d_ = {} for k, v in d.items(): d_[k] = int(v) d =", "m = pat.match(txt) if m: d = m.groupdict() d_ = {} for k,", "d.items(): d_[k] = int(v) d = d_ else: pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 =", "detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m,", "rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell #", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "RuntimeError, \"Cannot parse activity detail\" if len(statements) == 0 or statements[-1]['type'] != 'activity':", "parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\", parse_width), (u\"(^ช่องแรกกว้าง)(.+)\", parse_first_column_width)]", "parse activity detail\" if len(statements) == 0 or statements[-1]['type'] != 'activity': raise RuntimeError,", "#-*- coding: UTF-8 -*- # # Copyright 2009 <NAME> # # Permission is", "be # included in all copies or substantial portions of the Software. #", "parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())})", "= pat.match(txt) if m: d = m.groupdict() d_ = {} for k, v", "and to permit persons to whom the # Software is furnished to do", "map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None for pat in pats: m_", "statements[-1]['type'] != 'activity': raise RuntimeError, \"There is no activity before activity detail\" statements[-1][result[0]]", "to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "m.group(2).strip() statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date)", "else: raise RuntimeError, \"Cannot parse resolution\" def parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip())", "in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line != '': for pat in", "sell # copies of the Software, and to permit persons to whom the", "PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "\"Cannot parse first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) , (u\"(^\\*)(.+)\",", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "do so, subject to the following # conditions: # # The above copyright", "m_: statements.append({'type': 'resolution', 'day': 0, 'year': 0, 'month': int(m_.group(1))}) else: raise RuntimeError, \"Cannot", "Software, and to permit persons to whom the # Software is furnished to", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT", "parse_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else:", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER", "statements.append({'type': 'activity', 'desc': desc}) def parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def", "d_ d['day'] = None else: raise RuntimeError, \"cannot parse date\" + \" :\"", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE", "# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER", "of the Software, and to permit persons to whom the # Software is", "use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "to any person # obtaining a copy of this software and associated documentation", "[] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line != '':", "WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN", "v in d.items(): d_[k] = int(v) d = d_ d['day'] = None else:", "following # conditions: # # The above copyright notice and this permission notice", "# conditions: # # The above copyright notice and this permission notice shall", "Copyright 2009 <NAME> # # Permission is hereby granted, free of charge, to", "the Software without # restriction, including without limitation the rights to use, #", "parse_act_start(m): date = parse_date(m.groupdict()['date'].strip()) return ('start', date) def parse_act_end(m): date = parse_date(m.groupdict()['date'].strip()) return", "= re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d = m2.groupdict() d_ = {}", "pat in start_pats: m = pat[0].match(line) if m: pat[1](m, statements) break return statements", "pat2 = re.compile('^(?P<month>\\d+)/(?P<year>\\d+)$') m2 = pat2.match(txt) if m2: d = m2.groupdict() d_ =", "= {} for k, v in d.items(): d_[k] = int(v) d = d_", "result = pat[1](m_) break if result is None: raise RuntimeError, \"Cannot parse activity", "above copyright notice and this permission notice shall be # included in all", "raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if", "= m.groupdict() d_ = {} for k, v in d.items(): d_[k] = int(v)", "for k, v in d.items(): d_[k] = int(v) d = d_ else: pat2", "WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE.", "'activity': raise RuntimeError, \"There is no activity before activity detail\" statements[-1][result[0]] = result[1]", "txt.replace(\"\\r\", \"\").split(\"\\n\"): line = line.strip() if line != '': for pat in start_pats:", "date) def parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats", "parse_activity) , (u\"(^\\*)(.+)\", parse_activity_detail), (u\"(^เริ่ม)(.+)\", parse_start), ( u\"(^สิ้นสุด)(.+)\", parse_end ), (u\"(^ความละเอียด)(.+)\", parse_resolution), (u\"(^ช่องกว้าง)(.+)\",", "The above copyright notice and this permission notice shall be # included in", "= [(u\"^ตั้งแต่(?P<date>.+)\", parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]),", "pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"): line", "# files (the \"Software\"), to deal in the Software without # restriction, including", "pat2.match(txt) if m2: d = m2.groupdict() d_ = {} for k, v in", "result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end',", "CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT", "(u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result = None for", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "the # Software is furnished to do so, subject to the following #", "map(lambda pat: (re.compile(pat[0]), pat[1]), start_pats) statements = [] for line in txt.replace(\"\\r\", \"\").split(\"\\n\"):", "break if result is None: raise RuntimeError, \"Cannot parse activity detail\" if len(statements)", "pat[1](m_) break if result is None: raise RuntimeError, \"Cannot parse activity detail\" if", "'': for pat in start_pats: m = pat[0].match(line) if m: pat[1](m, statements) break", "to permit persons to whom the # Software is furnished to do so,", "the following # conditions: # # The above copyright notice and this permission", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR", "pat in pats: m_ = pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if", "FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE", "AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS", "m_: statements.append({'type': 'width', 'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m,", "BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())}) def parse_end(m, statements): statements.append({'type': 'end', 'date': parse_date(m.group(2).strip())}) def parse_resolution(m,", "d['day'] = None else: raise RuntimeError, \"cannot parse date\" + \" :\" +", "# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE", "RuntimeError, \"Cannot parse first column width\" def parse(txt): start_pats = [(u\"(^กิจกรรม)(.+)\", parse_activity) ,", "pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if result is None: raise RuntimeError,", "int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\",", "LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "before activity detail\" statements[-1][result[0]] = result[1] def parse_start(m, statements): statements.append({'type': 'start', 'date': parse_date(m.group(2).strip())})", "parse_act_start), (u\"^ถึง(?P<date>.+)\", parse_act_end), (u\"^สำเร็จร้อยละ\\s*(?P<percent>\\d+)\", parse_act_complete)] pats = map(lambda pat: (re.compile(pat[0]), pat[1]), pats) result", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "free of charge, to any person # obtaining a copy of this software", "to deal in the Software without # restriction, including without limitation the rights", "parse_act_complete(m): percent = int(m.groupdict()['percent'].strip()) return ('complete', percent) def parse_activity_detail(m, statements): pats = [(u\"^ตั้งแต่(?P<date>.+)\",", "the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell", "m: d = m.groupdict() d_ = {} for k, v in d.items(): d_[k]", "a copy of this software and associated documentation # files (the \"Software\"), to", "without # restriction, including without limitation the rights to use, # copy, modify,", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN", "import re def parse_date(txt): pat = re.compile('^(?P<day>\\d+)/(?P<month>\\d+)/(?P<year>\\d+)$') m = pat.match(txt) if m: d", "# copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "detail\" if len(statements) == 0 or statements[-1]['type'] != 'activity': raise RuntimeError, \"There is", "width\" def parse_first_column_width(m, statements): m_ = re.match(u\"^(\\d+)$\", m.group(2).strip()) if m_: statements.append({'type': '1st_col_width', 'value':", "= pat[0].match(m.group(2).strip()) if m_: result = pat[1](m_) break if result is None: raise", "'value': int(m_.group(1))}) else: raise RuntimeError, \"Cannot parse first column width\" def parse(txt): start_pats" ]
[ "# Copyright 2017 <NAME>. See LICENSE.md file for terms. import socket import threading", "thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s =", "s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address: ' + str(addr)) while", "See LICENSE.md file for terms. import socket import threading import api import usersim", "socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address: ' + str(addr))", "= conn.recv(20) if not data: break print('received data: ' + str(data)) conn.send(data) conn.close()", "run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist':", "s.accept() print('Connection Address: ' + str(addr)) while True: data = conn.recv(20) if not", "usersim TCP_IP = 'localhost' TCP_PORT = 5005 def run_test(): telnet_config = {'type': 'telnet',", "not data: break print('received data: ' + str(data)) conn.send(data) conn.close() if __name__ ==", "for terms. import socket import threading import api import usersim TCP_IP = 'localhost'", "'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff',", "def run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password',", "= api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr", "'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do", "2017 <NAME>. See LICENSE.md file for terms. import socket import threading import api", "data = conn.recv(20) if not data: break print('received data: ' + str(data)) conn.send(data)", "usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1)", "5005 def run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password':", "TCP_PORT = 5005 def run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username':", "'password', 'commandlist': ['printstuff', 'do other stuff', 'do this thing'], 'port': TCP_PORT}} sim =", "TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address: ' + str(addr)) while True:", "import usersim TCP_IP = 'localhost' TCP_PORT = 5005 def run_test(): telnet_config = {'type':", "= usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT))", "True: data = conn.recv(20) if not data: break print('received data: ' + str(data))", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address: '", "stuff', 'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def", "file for terms. import socket import threading import api import usersim TCP_IP =", "'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server():", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address: ' +", "TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "Copyright 2017 <NAME>. See LICENSE.md file for terms. import socket import threading import", "this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s", "str(addr)) while True: data = conn.recv(20) if not data: break print('received data: '", "+ str(addr)) while True: data = conn.recv(20) if not data: break print('received data:", "api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr =", "Address: ' + str(addr)) while True: data = conn.recv(20) if not data: break", "socket import threading import api import usersim TCP_IP = 'localhost' TCP_PORT = 5005", "'commandlist': ['printstuff', 'do other stuff', 'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True)", "= 'localhost' TCP_PORT = 5005 def run_test(): telnet_config = {'type': 'telnet', 'config': {'host':", "data: break print('received data: ' + str(data)) conn.send(data) conn.close() if __name__ == '__main__':", "'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do this thing'], 'port':", "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection Address:", "print('Connection Address: ' + str(addr)) while True: data = conn.recv(20) if not data:", "sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP,", "conn.recv(20) if not data: break print('received data: ' + str(data)) conn.send(data) conn.close() if", "'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET,", "TCP_IP = 'localhost' TCP_PORT = 5005 def run_test(): telnet_config = {'type': 'telnet', 'config':", "{'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do this", "TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do this thing'],", "threading import api import usersim TCP_IP = 'localhost' TCP_PORT = 5005 def run_test():", "import api import usersim TCP_IP = 'localhost' TCP_PORT = 5005 def run_test(): telnet_config", "terms. import socket import threading import api import usersim TCP_IP = 'localhost' TCP_PORT", "= s.accept() print('Connection Address: ' + str(addr)) while True: data = conn.recv(20) if", "api import usersim TCP_IP = 'localhost' TCP_PORT = 5005 def run_test(): telnet_config =", "' + str(addr)) while True: data = conn.recv(20) if not data: break print('received", "'localhost' TCP_PORT = 5005 def run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP,", "LICENSE.md file for terms. import socket import threading import api import usersim TCP_IP", "'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do this thing'], 'port': TCP_PORT}} sim", "'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other stuff', 'do this thing'], 'port': TCP_PORT}}", "break print('received data: ' + str(data)) conn.send(data) conn.close() if __name__ == '__main__': run_test()", "s.listen(1) conn, addr = s.accept() print('Connection Address: ' + str(addr)) while True: data", "def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept()", "start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn, addr = s.accept() print('Connection", "= 5005 def run_test(): telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin',", "addr = s.accept() print('Connection Address: ' + str(addr)) while True: data = conn.recv(20)", "['printstuff', 'do other stuff', 'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id", "= {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do", "import socket import threading import api import usersim TCP_IP = 'localhost' TCP_PORT =", "telnet_config = {'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff',", "{'type': 'telnet', 'config': {'host': TCP_IP, 'username': 'admin', 'password': 'password', 'commandlist': ['printstuff', 'do other", "other stuff', 'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id = api.validate_config(telnet_config)", "conn, addr = s.accept() print('Connection Address: ' + str(addr)) while True: data =", "task_id = api.validate_config(telnet_config) def start_server(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((TCP_IP, TCP_PORT)) s.listen(1) conn,", "if not data: break print('received data: ' + str(data)) conn.send(data) conn.close() if __name__", "import threading import api import usersim TCP_IP = 'localhost' TCP_PORT = 5005 def", "<NAME>. See LICENSE.md file for terms. import socket import threading import api import", "'do other stuff', 'do this thing'], 'port': TCP_PORT}} sim = usersim.UserSim(True) task_id =", "while True: data = conn.recv(20) if not data: break print('received data: ' +" ]
[ "<filename>polichart/api/views.py # -*- coding: utf-8 -*- from flask import Blueprint, current_app, request, jsonify", "flask.ext.login import login_user, current_user, logout_user from ..extensions import db from polichart import models", "from ..extensions import db from polichart import models api = Blueprint('api', __name__, url_prefix='/api')", "Blueprint, current_app, request, jsonify from flask.ext.login import login_user, current_user, logout_user from ..extensions import", "current_user, logout_user from ..extensions import db from polichart import models api = Blueprint('api',", "jsonify from flask.ext.login import login_user, current_user, logout_user from ..extensions import db from polichart", "coding: utf-8 -*- from flask import Blueprint, current_app, request, jsonify from flask.ext.login import", "# -*- coding: utf-8 -*- from flask import Blueprint, current_app, request, jsonify from", "import Blueprint, current_app, request, jsonify from flask.ext.login import login_user, current_user, logout_user from ..extensions", "login_user, current_user, logout_user from ..extensions import db from polichart import models api =", "-*- coding: utf-8 -*- from flask import Blueprint, current_app, request, jsonify from flask.ext.login", "current_app, request, jsonify from flask.ext.login import login_user, current_user, logout_user from ..extensions import db", "flask import Blueprint, current_app, request, jsonify from flask.ext.login import login_user, current_user, logout_user from", "from flask import Blueprint, current_app, request, jsonify from flask.ext.login import login_user, current_user, logout_user", "utf-8 -*- from flask import Blueprint, current_app, request, jsonify from flask.ext.login import login_user,", "logout_user from ..extensions import db from polichart import models api = Blueprint('api', __name__,", "from flask.ext.login import login_user, current_user, logout_user from ..extensions import db from polichart import", "request, jsonify from flask.ext.login import login_user, current_user, logout_user from ..extensions import db from", "import login_user, current_user, logout_user from ..extensions import db from polichart import models api", "-*- from flask import Blueprint, current_app, request, jsonify from flask.ext.login import login_user, current_user," ]
[ "desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc:", "desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or", "desc: desc += f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path", "return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\" else \"\"),", "{service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\",", "ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service):", "pkg_resources from guillotina import app_settings from guillotina import configure from guillotina.api.service import Service", "{}) if swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜 permission: {service_def['permission']}\" else:", "with open(index_file) as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"]", "definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not", "self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc +=", "app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc", "id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\",", "Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\",", "name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if", "parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] =", "app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context):", "iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if", "{service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\",", "''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> '''", "def get_data(self, data): if callable(data): data = data(self.context) return data def load_swagger_info(self, api_def,", "definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)]", "ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file", "if swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜 permission: {service_def['permission']}\" else: desc", "self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self,", "for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path,", "\"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth =", "guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils", "service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def in", "import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import", "base_path, api_def, tags=None): tags = tags or [] for method in iface_conf.keys(): if", "= os.path.join(here, \"index.html\") with open(index_file) as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"]", ") async def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy(", "= app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if", "api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form", "if method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def,", "path.rstrip(\"/\") if path not in api_def: api_def[path or \"/\"] = {} desc =", "async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here,", "data): if callable(data): data = data(self.context) return data def load_swagger_info(self, api_def, path, method,", "\"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def", "os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags, service_def", "vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path", "from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import Interface from", "parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']:", "= get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url", "= get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf =", "trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def,", "async def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"]", "or [] for method in iface_conf.keys(): if method == \"endpoints\": for name in", "<div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface,", "import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import", "user = get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm =", "continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if", "path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ]", "= { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])),", ") else: self.load_swagger_info( api_def, path, method, tags, service_def ) async def __call__(self): user", "== \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], )", ").version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface =", "import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import", "zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, )", "vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] =", "request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)()", "if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ):", "[parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version'", "self.load_swagger_info( api_def, path, method, tags, service_def ) async def __call__(self): user = get_authenticated_user()", "\"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else:", "from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from", "\"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\",", "True): if desc: desc += f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission:", "tags or [] for method in iface_conf.keys(): if method == \"endpoints\": for name", "name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue service_def = iface_conf[method]", "= app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a", "swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings),", "= tags or [] for method in iface_conf.keys(): if method == \"endpoints\": for", "import copy import json import os from urllib.parse import urlparse import pkg_resources from", "= os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ =", "method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self,", ") if url is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError:", "swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is", "request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = ''", "iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML =", "if desc: desc += f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\"", "self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method,", "= app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None:", "= service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"]", "[\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\":", "get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url =", "import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import", "get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import Interface", "iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\":", "= get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\")", "guillotina import configure from guillotina.api.service import Service from guillotina.component import getMultiAdapter from guillotina.interfaces", "index_file = os.path.join(here, \"index.html\") with open(index_file) as fi: html = fi.read() swagger_settings =", "\"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\",", "api_def, tags=None): tags = tags or [] for method in iface_conf.keys(): if method", "api_def, path, method, tags, service_def ) async def __call__(self): user = get_authenticated_user() self.policy", "sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, )", "= iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue", "auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url !=", "api_def, path, method, tags, service_def): path = path.rstrip(\"/\") if path not in api_def:", "return data def load_swagger_info(self, api_def, path, method, tags, service_def): path = path.rstrip(\"/\") if", "method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")],", "swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try: url = getMultiAdapter((context,", "= AUTH_HTML else: auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url", "else: self.load_swagger_info( api_def, path, method, tags, service_def ) async def __call__(self): user =", "[])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf,", "= pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in", "tags=None): tags = tags or [] for method in iface_conf.keys(): if method ==", "'' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\" else", "service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info(", "guillotina.api.service import Service from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils", "here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__", "[\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs", "\"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc += f\"", "= self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme]", "fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\"", "service_def[\"permission\"], self.context ): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path =", "desc += f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or", "api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface)", "== \"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or", "permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if callable(data):", "tags = tags or [] for method in iface_conf.keys(): if method == \"endpoints\":", "[] for method in iface_conf.keys(): if method == \"endpoints\": for name in iface_conf[\"endpoints\"]:", "import Service from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import", "= path.rstrip(\"/\") if path not in api_def: api_def[path or \"/\"] = {} desc", "get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if", "class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if callable(data): data = data(self.context)", "\"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\",", "= \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth", "iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML", "self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path", "<form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service(", "app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\"", "json import os from urllib.parse import urlparse import pkg_resources from guillotina import app_settings", "{} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if", "iface_conf, base_path, api_def, tags=None): tags = tags or [] for method in iface_conf.keys():", "request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file)", "(service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue", "in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() ==", "Service from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user", "service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜 permission: {service_def['permission']}\"", "swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")),", "= url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = '' return html.format(", "IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme", ") async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file =", "from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from", "pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys():", "for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if", "path not in api_def: api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\"))", "= service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission(", "swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜 permission: {service_def['permission']}\" else: desc +=", "sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags, service_def )", "get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy", "class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, )", "getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if", "<div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\",", "definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface", "</form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request):", "swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in [\"\"]", "definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm)", "service_def): path = path.rstrip(\"/\") if path not in api_def: api_def[path or \"/\"] =", "api_def: api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\",", "sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path),", "[get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs =", "tags, service_def): path = path.rstrip(\"/\") if path not in api_def: api_def[path or \"/\"]", "= parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"]", "in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags,", "if callable(data): data = data(self.context) return data def load_swagger_info(self, api_def, path, method, tags,", "\"index.html\") with open(index_file) as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url =", "url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try: url", "in api_def: api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf =", "context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file =", "url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] =", "method, tags, service_def): path = path.rstrip(\"/\") if path not in api_def: api_def[path or", "= app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file) as fi: html =", "from guillotina import app_settings from guillotina import configure from guillotina.api.service import Service from", "method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\")", "get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags or [] for method in", "@configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'):", "if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition", "api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf", "from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True,", "get_data(self, data): if callable(data): data = data(self.context) return data def load_swagger_info(self, api_def, path,", "app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file) as fi:", "sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags, service_def ) async def __call__(self):", "app_settings from guillotina import configure from guillotina.api.service import Service from guillotina.component import getMultiAdapter", "else: auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url", "definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host", "os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True", "in iface_conf.keys(): if method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path,", "service_def ) async def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition =", "get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError", "context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data):", "not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path =", "[]): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\"", "in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def:", "self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm:", "for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\"", "data def load_swagger_info(self, api_def, path, method, tags, service_def): path = path.rstrip(\"/\") if path", "+ swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs =", "= True def get_data(self, data): if callable(data): data = data(self.context) return data def", "= urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"]", "isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info(", "path, method, tags, service_def ) async def __call__(self): user = get_authenticated_user() self.policy =", "import os from urllib.parse import urlparse import pkg_resources from guillotina import app_settings from", "= swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try: url =", "name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower()", "True def get_data(self, data): if callable(data): data = data(self.context) return data def load_swagger_info(self,", "configure from guillotina.api.service import Service from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL", "self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def,", "request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']:", "api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags,", "</div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def", "html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\" else \"\"), auth=auth,", "iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] =", "or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for", "def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags or [] for method", "get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name", ") vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"]", "app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try:", "name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html']", "api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {})", "load_swagger_info(self, api_def, path, method, tags, service_def): path = path.rstrip(\"/\") if path not in", "os from urllib.parse import urlparse import pkg_resources from guillotina import app_settings from guillotina", "'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path", "import urlparse import pkg_resources from guillotina import app_settings from guillotina import configure from", "AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div>", "swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = '' return", "import json import os from urllib.parse import urlparse import pkg_resources from guillotina import", "\"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\")", "\"X-VirtualHost-Monster\" ) if url is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except", "guillotina.utils import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here =", "def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\")", "desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags =", "SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if callable(data): data = data(self.context) return", "\"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags", "= fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if", "= {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True):", "api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})),", "urllib.parse import urlparse import pkg_resources from guillotina import app_settings from guillotina import configure", "else: index_file = os.path.join(here, \"index.html\") with open(index_file) as fi: html = fi.read() swagger_settings", "sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in", "if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ]", "IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth", "definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for", "not in api_def: api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf", "guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils", "\"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface", "auth = AUTH_HTML else: auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url,", "trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items():", "from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from", "fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" ) if url", "definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div>", "self.context ): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path,", "os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs,", ") class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if callable(data): data =", "request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\" else \"\"), auth=auth, title=swagger_settings['base_configuration']['info']['title'] )", "service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] =", "f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\":", "in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"])", "<a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True,", "{})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), }", "if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for", "open(index_file) as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or", "\"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def,", "〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = {", "from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from", "copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] =", "for method in iface_conf.keys(): if method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints(", "swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[", "\"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None):", "not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []):", "= '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\"", "try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"]", "= copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm = self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"]", "zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface,", "index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file) as fi: html", "parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else:", "resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service(", "urlparse import pkg_resources from guillotina import app_settings from guillotina import configure from guillotina.api.service", "\"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags", "guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils", "from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\",", "callable(data): data = data(self.context) return data def load_swagger_info(self, api_def, path, method, tags, service_def):", "if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file) as", "path = get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf", "id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\",", "if url is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url", "from urllib.parse import urlparse import pkg_resources from guillotina import app_settings from guillotina import", "method in iface_conf.keys(): if method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name],", "data = data(self.context) return data def load_swagger_info(self, api_def, path, method, tags, service_def): path", "''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if", "definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] =", "resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return", "= [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if", "in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"] path = get_full_content_path(self.context)", "{})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags or []", "import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class", "method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file", "dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path,", "self.request.headers.get(\"X-VirtualHost-Monster\") if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"]", "else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"]", "swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not", "html = fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get( \"X-VirtualHost-Monster\" )", "from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from", "None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host)", "= getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url", "\"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path,", "data(self.context) return data def load_swagger_info(self, api_def, path, method, tags, service_def): path = path.rstrip(\"/\")", "ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML", "sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method,", "tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags, service_def ) async def", "definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\">", "or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\":", "continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or", "if path not in api_def: api_def[path or \"/\"] = {} desc = self.get_data(service_def.get(\"description\",", "service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"],", "for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf,", "</div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context,", "= service_def[ \"permission\" ] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else:", "= self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution(", "self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'>", "f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] =", "os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue service_def =", "= os.path.join(base_path, sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if", "method, tags, service_def ) async def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user)", "] self.load_swagger_info( api_def, os.path.join(path, sub_path), method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path,", "ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def get_data(self, data): if callable(data): data", "or request.headers.get( \"X-VirtualHost-Monster\" ) if url is None: try: url = getMultiAdapter((context, request),", "app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if url != \"/\" else \"\"), auth=auth, title=swagger_settings['base_configuration']['info']['title']", "): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\", []): path = os.path.join(base_path, sub_path)", "import configure from guillotina.api.service import Service from guillotina.component import getMultiAdapter from guillotina.interfaces import", "get_full_content_path(self.context) for dotted_iface in api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface]", "= self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc", "import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import", "guillotina.utils import get_request_scheme from guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface", "getMultiAdapter from guillotina.interfaces import IAbsoluteURL from guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path", "guillotina import app_settings from guillotina import configure from guillotina.api.service import Service from guillotina.component", "dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\" ] self.load_swagger_info( api_def,", "self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})),", "__call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] ) vhm", "def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition = copy.deepcopy( app_settings[\"swagger\"][\"base_configuration\"] )", "method, tags, sub_service_def, ) else: self.load_swagger_info( api_def, path, method, tags, service_def ) async", "} def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags or [] for", "path = path.rstrip(\"/\") if path not in api_def: api_def[path or \"/\"] = {}", "tags, service_def ) async def __call__(self): user = get_authenticated_user() self.policy = get_security_policy(user) definition", "if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = '' return html.format( app_settings=app_settings, request=request,", "api_defs.keys(): iface = resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"]", ") else: if method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\",", "os.path.join(here, \"index.html\") with open(index_file) as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url", "AUTH_HTML else: auth = '' return html.format( app_settings=app_settings, request=request, swagger_settings=json.dumps(swagger_settings), base_url=url, static_url=\"{}/swagger_static/\".format(url if", "guillotina.utils import get_security_policy from guillotina.utils import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces", "render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with", "swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜", "tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\":", "if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in [\"\"] + swagger_conf.get(\"extra_paths\",", "from guillotina.utils import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here", "path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'> <div", "from guillotina.api.service import Service from guillotina.component import getMultiAdapter from guillotina.interfaces import IAbsoluteURL from", "= resolve_dotted_name(dotted_iface) if iface.providedBy(self.context): iface_conf = api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"]", "= [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs", "\"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\",", "except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth =", "self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue", "= service_def.get(\"swagger\", {}) if swagger_conf.get(\"display_permission\", True): if desc: desc += f\" 〜 permission:", "urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] = parsed_url.path else: definition[\"host\"] =", "= data(self.context) return data def load_swagger_info(self, api_def, path, method, tags, service_def): path =", "def load_swagger_info(self, api_def, path, method, tags, service_def): path = path.rstrip(\"/\") if path not", "url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else: auth = '' return html.format( app_settings=app_settings,", "] if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[ \"permission\"", "app_settings['swagger']['index_html'] else: index_file = os.path.join(here, \"index.html\") with open(index_file) as fi: html = fi.read()", "{ \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\":", "self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\"", "if method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if", "tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf =", "import app_settings from guillotina import configure from guillotina.api.service import Service from guillotina.component import", "if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version api_defs = app_settings[\"api_definition\"]", "as fi: html = fi.read() swagger_settings = app_settings[\"swagger\"] url = swagger_settings[\"base_url\"] or request.headers.get(", "permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\":", "iface_conf[\"endpoints\"][name], os.path.join(base_path, name), api_def, tags=[name.strip(\"@\")], ) else: if method.lower() == \"options\": continue service_def", "from guillotina import configure from guillotina.api.service import Service from guillotina.component import getMultiAdapter from", "service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path", "definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in definition['info']: definition[\"info\"][\"version\"] = pkg_resources.get_distribution( \"guillotina\" ).version", "else: if method.lower() == \"options\": continue service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {})", "\"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc, \"responses\": self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path,", "import pkg_resources from guillotina import app_settings from guillotina import configure from guillotina.api.service import", "if vhm: parsed_url = urlparse(vhm) definition[\"host\"] = parsed_url.netloc definition[\"schemes\"] = [parsed_url.scheme] definition[\"basePath\"] =", "else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"])", "+= f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()] = { \"tags\": swagger_conf.get(\"tags\", [\"\"]) or tags,", "path, method, tags, service_def): path = path.rstrip(\"/\") if path not in api_def: api_def[path", "import resolve_dotted_name from zope.interface import Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__))", "is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url = \"{}://{}\".format(get_request_scheme(request),", "= api_defs[dotted_iface] self.get_endpoints(iface_conf, path, definition[\"paths\"]) definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = '''", "url is None: try: url = getMultiAdapter((context, request), IAbsoluteURL)() except ComponentLookupError: url =", "or \"/\"] = {} desc = self.get_data(service_def.get(\"description\", \"\")) swagger_conf = service_def.get(\"swagger\", {}) if", "in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def", "import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils import", "+= f\" 〜 permission: {service_def['permission']}\" else: desc += f\"permission: {service_def['permission']}\" api_def[path or \"/\"][method.lower()]", "{}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context", "service_def = iface_conf[method] swagger_conf = service_def.get(\"swagger\", {}) if (service_def.get(\"ignore\") or service_def.get(\"swagger_ignore\") or swagger_conf.get(\"ignore\")):", "= ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a> </div> </div> </form>", "url = \"{}://{}\".format(get_request_scheme(request), request.host) swagger_settings[\"initial_swagger_url\"] = url if swagger_settings['authentication_allowed']: auth = AUTH_HTML else:", "copy import json import os from urllib.parse import urlparse import pkg_resources from guillotina", "@configure.service( method=\"GET\", context=Interface, name=\"@swagger\", permission=\"guillotina.swagger.View\", ignore=True, ) class SwaggerDefinitionService(Service): __allow_access__ = True def", "or tags, \"parameters\": self.get_data(service_def.get(\"parameters\", {})), \"produces\": self.get_data(service_def.get(\"produces\", [])), \"summary\": self.get_data(service_def.get(\"summary\", \"\")), \"description\": desc,", "permission=\"guillotina.swagger.View\", ignore=True, ) async def render_docs_index(context, request): if app_settings['swagger'].get('index_html'): index_file = app_settings['swagger']['index_html'] else:", "\"traversed_service_definitions\" ] if isinstance(trav_defs, dict): for sub_path, sub_service_def in trav_defs.items(): sub_service_def[\"permission\"] = service_def[", "or swagger_conf.get(\"ignore\")): continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in", "return definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div> <a class=\"authorize__btn\" href=\"#\">Authorize</a>", "__allow_access__ = True def get_data(self, data): if callable(data): data = data(self.context) return data", "definition[\"definitions\"] = app_settings[\"json_schema_definitions\"] return definition AUTH_HTML = ''' <form id='api_selector'> <div id=\"auth_container\"> <div>", "sub_path) if \"traversed_service_definitions\" in service_def: trav_defs = service_def[ \"traversed_service_definitions\" ] if isinstance(trav_defs, dict):", "continue if not self.policy.check_permission( service_def[\"permission\"], self.context ): continue for sub_path in [\"\"] +", "iface_conf.keys(): if method == \"endpoints\": for name in iface_conf[\"endpoints\"]: self.get_endpoints( iface_conf[\"endpoints\"][name], os.path.join(base_path, name),", "guillotina.utils import get_authenticated_user from guillotina.utils import get_full_content_path from guillotina.utils import get_request_scheme from guillotina.utils", "import Interface from zope.interface.interfaces import ComponentLookupError here = os.path.dirname(os.path.realpath(__file__)) @configure.service( method=\"GET\", context=Interface, name=\"@swagger\",", "self.get_data(service_def.get(\"responses\", {})), } def get_endpoints(self, iface_conf, base_path, api_def, tags=None): tags = tags or", "= parsed_url.path else: definition[\"host\"] = self.request.host definition[\"schemes\"] = [get_request_scheme(self.request)] if 'version' not in", "href=\"#\">Authorize</a> </div> </div> </form> ''' @configure.service( method=\"GET\", context=Interface, name=\"@docs\", permission=\"guillotina.swagger.View\", ignore=True, ) async" ]